llvm/clang/test/CodeGen/cx-complex-range.c

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
// RUN: -o - | FileCheck %s --check-prefix=FULL

// RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
// RUN: -complex-range=basic -o - | FileCheck %s --check-prefix=BASIC

// RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
// RUN: -fno-cx-limited-range -o - | FileCheck %s --check-prefix=FULL

// RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
// RUN: -complex-range=improved -o - | FileCheck %s --check-prefix=IMPRVD

// RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
// RUN: -complex-range=promoted -o - | FileCheck %s --check-prefix=PRMTD

// RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
// RUN: -complex-range=full -o - | FileCheck %s --check-prefix=FULL

// RUN: %clang_cc1 -triple x86_64-windows-pc -complex-range=promoted \
// RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=X86WINPRMTD

// RUN: %clang_cc1 -triple=avr-unknown-unknown -mdouble=32 \
// RUN: -complex-range=promoted -emit-llvm -o - %s \
// RUN: | FileCheck %s --check-prefix=AVRFP32

// RUN: %clang_cc1 -triple=avr-unknown-unknown -mdouble=64 \
// RUN: -complex-range=promoted -emit-llvm -o - %s \
// RUN: | FileCheck %s --check-prefix=AVRFP64

// Fast math
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu \
// RUN: -ffast-math -complex-range=basic -emit-llvm -o - %s \
// RUN: | FileCheck %s --check-prefix=BASIC_FAST

// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu \
// RUN: -ffast-math -complex-range=full -emit-llvm -o - %s \
// RUN: | FileCheck %s --check-prefix=FULL_FAST

// RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
// RUN: -fno-cx-fortran-rules -o - | FileCheck %s --check-prefix=FULL

// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu \
// RUN: -ffast-math -complex-range=improved -emit-llvm -o - %s \
// RUN: | FileCheck %s --check-prefix=IMPRVD_FAST

// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu \
// RUN: -ffast-math -complex-range=promoted -emit-llvm -o - %s \
// RUN: | FileCheck %s --check-prefix=PRMTD_FAST

// strict math mode
// RUN: %clang_cc1 -triple x86_64-windows-pc -complex-range=promoted \
// RUN: -ffp-contract=off -frounding-math -ffp-exception-behavior=strict \
// RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=X86WINPRMTD_STRICT

// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=promoted \
// RUN: -ffp-contract=off -frounding-math -ffp-exception-behavior=strict \
// RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=PRMTD_STRICT

// FULL-LABEL: define dso_local <2 x float> @divf(
// FULL-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// FULL-NEXT:  entry:
// FULL-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    [[COERCE:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// FULL-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// FULL-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// FULL-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// FULL-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// FULL-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// FULL-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// FULL-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// FULL-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// FULL-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// FULL-NEXT:    [[CALL:%.*]] = call <2 x float> @__divsc3(float noundef [[A_REAL]], float noundef [[A_IMAG]], float noundef [[B_REAL]], float noundef [[B_IMAG]]) #[[ATTR2:[0-9]+]]
// FULL-NEXT:    store <2 x float> [[CALL]], ptr [[COERCE]], align 4
// FULL-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
// FULL-NEXT:    [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
// FULL-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
// FULL-NEXT:    [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
// FULL-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// FULL-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// FULL-NEXT:    store float [[COERCE_REAL]], ptr [[RETVAL_REALP]], align 4
// FULL-NEXT:    store float [[COERCE_IMAG]], ptr [[RETVAL_IMAGP]], align 4
// FULL-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// FULL-NEXT:    ret <2 x float> [[TMP0]]
//
// BASIC-LABEL: define dso_local <2 x float> @divf(
// BASIC-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// BASIC-NEXT:  entry:
// BASIC-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// BASIC-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// BASIC-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// BASIC-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// BASIC-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// BASIC-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// BASIC-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// BASIC-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// BASIC-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// BASIC-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// BASIC-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// BASIC-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// BASIC-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// BASIC-NEXT:    [[TMP0:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// BASIC-NEXT:    [[TMP1:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// BASIC-NEXT:    [[TMP2:%.*]] = fadd float [[TMP0]], [[TMP1]]
// BASIC-NEXT:    [[TMP3:%.*]] = fmul float [[B_REAL]], [[B_REAL]]
// BASIC-NEXT:    [[TMP4:%.*]] = fmul float [[B_IMAG]], [[B_IMAG]]
// BASIC-NEXT:    [[TMP5:%.*]] = fadd float [[TMP3]], [[TMP4]]
// BASIC-NEXT:    [[TMP6:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// BASIC-NEXT:    [[TMP7:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// BASIC-NEXT:    [[TMP8:%.*]] = fsub float [[TMP6]], [[TMP7]]
// BASIC-NEXT:    [[TMP9:%.*]] = fdiv float [[TMP2]], [[TMP5]]
// BASIC-NEXT:    [[TMP10:%.*]] = fdiv float [[TMP8]], [[TMP5]]
// BASIC-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// BASIC-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// BASIC-NEXT:    store float [[TMP9]], ptr [[RETVAL_REALP]], align 4
// BASIC-NEXT:    store float [[TMP10]], ptr [[RETVAL_IMAGP]], align 4
// BASIC-NEXT:    [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// BASIC-NEXT:    ret <2 x float> [[TMP11]]
//
// IMPRVD-LABEL: define dso_local <2 x float> @divf(
// IMPRVD-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// IMPRVD-NEXT:  entry:
// IMPRVD-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// IMPRVD-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// IMPRVD-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// IMPRVD-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// IMPRVD-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// IMPRVD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// IMPRVD-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// IMPRVD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// IMPRVD-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// IMPRVD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// IMPRVD-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// IMPRVD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// IMPRVD-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// IMPRVD-NEXT:    [[TMP0:%.*]] = call float @llvm.fabs.f32(float [[B_REAL]])
// IMPRVD-NEXT:    [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[B_IMAG]])
// IMPRVD-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt float [[TMP0]], [[TMP1]]
// IMPRVD-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// IMPRVD:       abs_rhsr_greater_or_equal_abs_rhsi:
// IMPRVD-NEXT:    [[TMP2:%.*]] = fdiv float [[B_IMAG]], [[B_REAL]]
// IMPRVD-NEXT:    [[TMP3:%.*]] = fmul float [[TMP2]], [[B_IMAG]]
// IMPRVD-NEXT:    [[TMP4:%.*]] = fadd float [[B_REAL]], [[TMP3]]
// IMPRVD-NEXT:    [[TMP5:%.*]] = fmul float [[A_IMAG]], [[TMP2]]
// IMPRVD-NEXT:    [[TMP6:%.*]] = fadd float [[A_REAL]], [[TMP5]]
// IMPRVD-NEXT:    [[TMP7:%.*]] = fdiv float [[TMP6]], [[TMP4]]
// IMPRVD-NEXT:    [[TMP8:%.*]] = fmul float [[A_REAL]], [[TMP2]]
// IMPRVD-NEXT:    [[TMP9:%.*]] = fsub float [[A_IMAG]], [[TMP8]]
// IMPRVD-NEXT:    [[TMP10:%.*]] = fdiv float [[TMP9]], [[TMP4]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV:%.*]]
// IMPRVD:       abs_rhsr_less_than_abs_rhsi:
// IMPRVD-NEXT:    [[TMP11:%.*]] = fdiv float [[B_REAL]], [[B_IMAG]]
// IMPRVD-NEXT:    [[TMP12:%.*]] = fmul float [[TMP11]], [[B_REAL]]
// IMPRVD-NEXT:    [[TMP13:%.*]] = fadd float [[B_IMAG]], [[TMP12]]
// IMPRVD-NEXT:    [[TMP14:%.*]] = fmul float [[A_REAL]], [[TMP11]]
// IMPRVD-NEXT:    [[TMP15:%.*]] = fadd float [[TMP14]], [[A_IMAG]]
// IMPRVD-NEXT:    [[TMP16:%.*]] = fdiv float [[TMP15]], [[TMP13]]
// IMPRVD-NEXT:    [[TMP17:%.*]] = fmul float [[A_IMAG]], [[TMP11]]
// IMPRVD-NEXT:    [[TMP18:%.*]] = fsub float [[TMP17]], [[A_REAL]]
// IMPRVD-NEXT:    [[TMP19:%.*]] = fdiv float [[TMP18]], [[TMP13]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV]]
// IMPRVD:       complex_div:
// IMPRVD-NEXT:    [[TMP20:%.*]] = phi float [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD-NEXT:    [[TMP21:%.*]] = phi float [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD-NEXT:    store float [[TMP20]], ptr [[RETVAL_REALP]], align 4
// IMPRVD-NEXT:    store float [[TMP21]], ptr [[RETVAL_IMAGP]], align 4
// IMPRVD-NEXT:    [[TMP22:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// IMPRVD-NEXT:    ret <2 x float> [[TMP22]]
//
// PRMTD-LABEL: define dso_local <2 x float> @divf(
// PRMTD-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// PRMTD-NEXT:  entry:
// PRMTD-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// PRMTD-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// PRMTD-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// PRMTD-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// PRMTD-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// PRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// PRMTD-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// PRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// PRMTD-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// PRMTD-NEXT:    [[EXT:%.*]] = fpext float [[A_REAL]] to double
// PRMTD-NEXT:    [[EXT1:%.*]] = fpext float [[A_IMAG]] to double
// PRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// PRMTD-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// PRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// PRMTD-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// PRMTD-NEXT:    [[EXT2:%.*]] = fpext float [[B_REAL]] to double
// PRMTD-NEXT:    [[EXT3:%.*]] = fpext float [[B_IMAG]] to double
// PRMTD-NEXT:    [[TMP0:%.*]] = fmul double [[EXT]], [[EXT2]]
// PRMTD-NEXT:    [[TMP1:%.*]] = fmul double [[EXT1]], [[EXT3]]
// PRMTD-NEXT:    [[TMP2:%.*]] = fadd double [[TMP0]], [[TMP1]]
// PRMTD-NEXT:    [[TMP3:%.*]] = fmul double [[EXT2]], [[EXT2]]
// PRMTD-NEXT:    [[TMP4:%.*]] = fmul double [[EXT3]], [[EXT3]]
// PRMTD-NEXT:    [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
// PRMTD-NEXT:    [[TMP6:%.*]] = fmul double [[EXT1]], [[EXT2]]
// PRMTD-NEXT:    [[TMP7:%.*]] = fmul double [[EXT]], [[EXT3]]
// PRMTD-NEXT:    [[TMP8:%.*]] = fsub double [[TMP6]], [[TMP7]]
// PRMTD-NEXT:    [[TMP9:%.*]] = fdiv double [[TMP2]], [[TMP5]]
// PRMTD-NEXT:    [[TMP10:%.*]] = fdiv double [[TMP8]], [[TMP5]]
// PRMTD-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP9]] to float
// PRMTD-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc double [[TMP10]] to float
// PRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// PRMTD-NEXT:    store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
// PRMTD-NEXT:    [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// PRMTD-NEXT:    ret <2 x float> [[TMP11]]
//
// X86WINPRMTD-LABEL: define dso_local i64 @divf(
// X86WINPRMTD-SAME: i64 noundef [[A_COERCE:%.*]], i64 noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// X86WINPRMTD-NEXT:  entry:
// X86WINPRMTD-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD-NEXT:    store i64 [[A_COERCE]], ptr [[A]], align 4
// X86WINPRMTD-NEXT:    store i64 [[B_COERCE]], ptr [[B]], align 4
// X86WINPRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// X86WINPRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// X86WINPRMTD-NEXT:    [[EXT:%.*]] = fpext float [[A_REAL]] to double
// X86WINPRMTD-NEXT:    [[EXT1:%.*]] = fpext float [[A_IMAG]] to double
// X86WINPRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// X86WINPRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// X86WINPRMTD-NEXT:    [[EXT2:%.*]] = fpext float [[B_REAL]] to double
// X86WINPRMTD-NEXT:    [[EXT3:%.*]] = fpext float [[B_IMAG]] to double
// X86WINPRMTD-NEXT:    [[TMP0:%.*]] = fmul double [[EXT]], [[EXT2]]
// X86WINPRMTD-NEXT:    [[TMP1:%.*]] = fmul double [[EXT1]], [[EXT3]]
// X86WINPRMTD-NEXT:    [[TMP2:%.*]] = fadd double [[TMP0]], [[TMP1]]
// X86WINPRMTD-NEXT:    [[TMP3:%.*]] = fmul double [[EXT2]], [[EXT2]]
// X86WINPRMTD-NEXT:    [[TMP4:%.*]] = fmul double [[EXT3]], [[EXT3]]
// X86WINPRMTD-NEXT:    [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
// X86WINPRMTD-NEXT:    [[TMP6:%.*]] = fmul double [[EXT1]], [[EXT2]]
// X86WINPRMTD-NEXT:    [[TMP7:%.*]] = fmul double [[EXT]], [[EXT3]]
// X86WINPRMTD-NEXT:    [[TMP8:%.*]] = fsub double [[TMP6]], [[TMP7]]
// X86WINPRMTD-NEXT:    [[TMP9:%.*]] = fdiv double [[TMP2]], [[TMP5]]
// X86WINPRMTD-NEXT:    [[TMP10:%.*]] = fdiv double [[TMP8]], [[TMP5]]
// X86WINPRMTD-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP9]] to float
// X86WINPRMTD-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc double [[TMP10]] to float
// X86WINPRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// X86WINPRMTD-NEXT:    store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
// X86WINPRMTD-NEXT:    [[TMP11:%.*]] = load i64, ptr [[RETVAL]], align 4
// X86WINPRMTD-NEXT:    ret i64 [[TMP11]]
//
// AVRFP32-LABEL: define dso_local { float, float } @divf(
// AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0:[0-9]+]] {
// AVRFP32-NEXT:  entry:
// AVRFP32-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[A:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[B:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP32-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP32-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP32-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
// AVRFP32-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
// AVRFP32-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
// AVRFP32-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP4:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_REAL]])
// AVRFP32-NEXT:    [[TMP5:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_IMAG]])
// AVRFP32-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt float [[TMP4]], [[TMP5]]
// AVRFP32-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// AVRFP32:       abs_rhsr_greater_or_equal_abs_rhsi:
// AVRFP32-NEXT:    [[TMP6:%.*]] = fdiv float [[B_IMAG]], [[B_REAL]]
// AVRFP32-NEXT:    [[TMP7:%.*]] = fmul float [[TMP6]], [[B_IMAG]]
// AVRFP32-NEXT:    [[TMP8:%.*]] = fadd float [[B_REAL]], [[TMP7]]
// AVRFP32-NEXT:    [[TMP9:%.*]] = fmul float [[A_IMAG]], [[TMP6]]
// AVRFP32-NEXT:    [[TMP10:%.*]] = fadd float [[A_REAL]], [[TMP9]]
// AVRFP32-NEXT:    [[TMP11:%.*]] = fdiv float [[TMP10]], [[TMP8]]
// AVRFP32-NEXT:    [[TMP12:%.*]] = fmul float [[A_REAL]], [[TMP6]]
// AVRFP32-NEXT:    [[TMP13:%.*]] = fsub float [[A_IMAG]], [[TMP12]]
// AVRFP32-NEXT:    [[TMP14:%.*]] = fdiv float [[TMP13]], [[TMP8]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV:%.*]]
// AVRFP32:       abs_rhsr_less_than_abs_rhsi:
// AVRFP32-NEXT:    [[TMP15:%.*]] = fdiv float [[B_REAL]], [[B_IMAG]]
// AVRFP32-NEXT:    [[TMP16:%.*]] = fmul float [[TMP15]], [[B_REAL]]
// AVRFP32-NEXT:    [[TMP17:%.*]] = fadd float [[B_IMAG]], [[TMP16]]
// AVRFP32-NEXT:    [[TMP18:%.*]] = fmul float [[A_REAL]], [[TMP15]]
// AVRFP32-NEXT:    [[TMP19:%.*]] = fadd float [[TMP18]], [[A_IMAG]]
// AVRFP32-NEXT:    [[TMP20:%.*]] = fdiv float [[TMP19]], [[TMP17]]
// AVRFP32-NEXT:    [[TMP21:%.*]] = fmul float [[A_IMAG]], [[TMP15]]
// AVRFP32-NEXT:    [[TMP22:%.*]] = fsub float [[TMP21]], [[A_REAL]]
// AVRFP32-NEXT:    [[TMP23:%.*]] = fdiv float [[TMP22]], [[TMP17]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV]]
// AVRFP32:       complex_div:
// AVRFP32-NEXT:    [[TMP24:%.*]] = phi float [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP32-NEXT:    [[TMP25:%.*]] = phi float [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP32-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP32-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[TMP24]], ptr [[RETVAL_REALP]], align 1
// AVRFP32-NEXT:    store float [[TMP25]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP26:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP32-NEXT:    ret { float, float } [[TMP26]]
//
// AVRFP64-LABEL: define dso_local { float, float } @divf(
// AVRFP64-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0:[0-9]+]] {
// AVRFP64-NEXT:  entry:
// AVRFP64-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP64-NEXT:    [[A:%.*]] = alloca { float, float }, align 1
// AVRFP64-NEXT:    [[B:%.*]] = alloca { float, float }, align 1
// AVRFP64-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP64-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    store float [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP64-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    store float [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
// AVRFP64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
// AVRFP64-NEXT:    [[EXT:%.*]] = fpext float [[A_REAL]] to double
// AVRFP64-NEXT:    [[EXT1:%.*]] = fpext float [[A_IMAG]] to double
// AVRFP64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
// AVRFP64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
// AVRFP64-NEXT:    [[EXT2:%.*]] = fpext float [[B_REAL]] to double
// AVRFP64-NEXT:    [[EXT3:%.*]] = fpext float [[B_IMAG]] to double
// AVRFP64-NEXT:    [[TMP4:%.*]] = fmul double [[EXT]], [[EXT2]]
// AVRFP64-NEXT:    [[TMP5:%.*]] = fmul double [[EXT1]], [[EXT3]]
// AVRFP64-NEXT:    [[TMP6:%.*]] = fadd double [[TMP4]], [[TMP5]]
// AVRFP64-NEXT:    [[TMP7:%.*]] = fmul double [[EXT2]], [[EXT2]]
// AVRFP64-NEXT:    [[TMP8:%.*]] = fmul double [[EXT3]], [[EXT3]]
// AVRFP64-NEXT:    [[TMP9:%.*]] = fadd double [[TMP7]], [[TMP8]]
// AVRFP64-NEXT:    [[TMP10:%.*]] = fmul double [[EXT1]], [[EXT2]]
// AVRFP64-NEXT:    [[TMP11:%.*]] = fmul double [[EXT]], [[EXT3]]
// AVRFP64-NEXT:    [[TMP12:%.*]] = fsub double [[TMP10]], [[TMP11]]
// AVRFP64-NEXT:    [[TMP13:%.*]] = fdiv double [[TMP6]], [[TMP9]]
// AVRFP64-NEXT:    [[TMP14:%.*]] = fdiv double [[TMP12]], [[TMP9]]
// AVRFP64-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP13]] to float
// AVRFP64-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc double [[TMP14]] to float
// AVRFP64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP64-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 1
// AVRFP64-NEXT:    store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP64-NEXT:    [[TMP15:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP64-NEXT:    ret { float, float } [[TMP15]]
//
// BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @divf(
// BASIC_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// BASIC_FAST-NEXT:  entry:
// BASIC_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// BASIC_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// BASIC_FAST-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// BASIC_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// BASIC_FAST-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// BASIC_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// BASIC_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// BASIC_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// BASIC_FAST-NEXT:    [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP0]], [[TMP1]]
// BASIC_FAST-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[B_REAL]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[B_IMAG]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[TMP5:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP3]], [[TMP4]]
// BASIC_FAST-NEXT:    [[TMP6:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP6]], [[TMP7]]
// BASIC_FAST-NEXT:    [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP2]], [[TMP5]]
// BASIC_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP8]], [[TMP5]]
// BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// BASIC_FAST-NEXT:    store float [[TMP9]], ptr [[RETVAL_REALP]], align 4
// BASIC_FAST-NEXT:    store float [[TMP10]], ptr [[RETVAL_IMAGP]], align 4
// BASIC_FAST-NEXT:    [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// BASIC_FAST-NEXT:    ret <2 x float> [[TMP11]]
//
// FULL_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @divf(
// FULL_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// FULL_FAST-NEXT:  entry:
// FULL_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    [[COERCE:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// FULL_FAST-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// FULL_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// FULL_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// FULL_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// FULL_FAST-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// FULL_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// FULL_FAST-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) <2 x float> @__divsc3(float noundef nofpclass(nan inf) [[A_REAL]], float noundef nofpclass(nan inf) [[A_IMAG]], float noundef nofpclass(nan inf) [[B_REAL]], float noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2:[0-9]+]]
// FULL_FAST-NEXT:    store <2 x float> [[CALL]], ptr [[COERCE]], align 4
// FULL_FAST-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
// FULL_FAST-NEXT:    [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
// FULL_FAST-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
// FULL_FAST-NEXT:    [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
// FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// FULL_FAST-NEXT:    store float [[COERCE_REAL]], ptr [[RETVAL_REALP]], align 4
// FULL_FAST-NEXT:    store float [[COERCE_IMAG]], ptr [[RETVAL_IMAGP]], align 4
// FULL_FAST-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// FULL_FAST-NEXT:    ret <2 x float> [[TMP0]]
//
// IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @divf(
// IMPRVD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// IMPRVD_FAST-NEXT:  entry:
// IMPRVD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// IMPRVD_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// IMPRVD_FAST-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// IMPRVD_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// IMPRVD_FAST-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// IMPRVD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// IMPRVD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// IMPRVD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[B_REAL]])
// IMPRVD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[B_IMAG]])
// IMPRVD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt float [[TMP0]], [[TMP1]]
// IMPRVD_FAST-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// IMPRVD_FAST:       abs_rhsr_greater_or_equal_abs_rhsi:
// IMPRVD_FAST-NEXT:    [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[B_IMAG]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[TMP2]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[B_REAL]], [[TMP3]]
// IMPRVD_FAST-NEXT:    [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[TMP2]]
// IMPRVD_FAST-NEXT:    [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[TMP5]]
// IMPRVD_FAST-NEXT:    [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP6]], [[TMP4]]
// IMPRVD_FAST-NEXT:    [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[TMP2]]
// IMPRVD_FAST-NEXT:    [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[TMP8]]
// IMPRVD_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP9]], [[TMP4]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV:%.*]]
// IMPRVD_FAST:       abs_rhsr_less_than_abs_rhsi:
// IMPRVD_FAST-NEXT:    [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[B_REAL]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[TMP11]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[B_IMAG]], [[TMP12]]
// IMPRVD_FAST-NEXT:    [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[TMP11]]
// IMPRVD_FAST-NEXT:    [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP14]], [[A_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP15]], [[TMP13]]
// IMPRVD_FAST-NEXT:    [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[TMP11]]
// IMPRVD_FAST-NEXT:    [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP17]], [[A_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP18]], [[TMP13]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV]]
// IMPRVD_FAST:       complex_div:
// IMPRVD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store float [[TMP20]], ptr [[RETVAL_REALP]], align 4
// IMPRVD_FAST-NEXT:    store float [[TMP21]], ptr [[RETVAL_IMAGP]], align 4
// IMPRVD_FAST-NEXT:    [[TMP22:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// IMPRVD_FAST-NEXT:    ret <2 x float> [[TMP22]]
//
// PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @divf(
// PRMTD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// PRMTD_FAST-NEXT:  entry:
// PRMTD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// PRMTD_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// PRMTD_FAST-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// PRMTD_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// PRMTD_FAST-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// PRMTD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext float [[A_REAL]] to double
// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext float [[A_IMAG]] to double
// PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext float [[B_REAL]] to double
// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext float [[B_IMAG]] to double
// PRMTD_FAST-NEXT:    [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT2]]
// PRMTD_FAST-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT1]], [[EXT3]]
// PRMTD_FAST-NEXT:    [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP0]], [[TMP1]]
// PRMTD_FAST-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT2]], [[EXT2]]
// PRMTD_FAST-NEXT:    [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT3]], [[EXT3]]
// PRMTD_FAST-NEXT:    [[TMP5:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP3]], [[TMP4]]
// PRMTD_FAST-NEXT:    [[TMP6:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT1]], [[EXT2]]
// PRMTD_FAST-NEXT:    [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT3]]
// PRMTD_FAST-NEXT:    [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP6]], [[TMP7]]
// PRMTD_FAST-NEXT:    [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP2]], [[TMP5]]
// PRMTD_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP8]], [[TMP5]]
// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP9]] to float
// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc double [[TMP10]] to float
// PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// PRMTD_FAST-NEXT:    store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
// PRMTD_FAST-NEXT:    [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// PRMTD_FAST-NEXT:    ret <2 x float> [[TMP11]]
//
// X86WINPRMTD_STRICT-LABEL: define dso_local i64 @divf(
// X86WINPRMTD_STRICT-SAME: i64 noundef [[A_COERCE:%.*]], i64 noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// X86WINPRMTD_STRICT-NEXT:  entry:
// X86WINPRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD_STRICT-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD_STRICT-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD_STRICT-NEXT:    store i64 [[A_COERCE]], ptr [[A]], align 4
// X86WINPRMTD_STRICT-NEXT:    store i64 [[B_COERCE]], ptr [[B]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[EXT:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR3:[0-9]+]]
// X86WINPRMTD_STRICT-NEXT:    [[EXT1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[EXT2:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[B_REAL]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[EXT3:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[B_IMAG]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP0:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT1]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP0]], double [[TMP1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT2]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP4:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT3]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP5:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP3]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP6:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT1]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP7:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP8:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP6]], double [[TMP7]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP9:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP2]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP8]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[UNPROMOTION:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP9]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[UNPROMOTION4:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// X86WINPRMTD_STRICT-NEXT:    store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[TMP11:%.*]] = load i64, ptr [[RETVAL]], align 4
// X86WINPRMTD_STRICT-NEXT:    ret i64 [[TMP11]]
//
// PRMTD_STRICT-LABEL: define dso_local <2 x float> @divf(
// PRMTD_STRICT-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
// PRMTD_STRICT-NEXT:  entry:
// PRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// PRMTD_STRICT-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// PRMTD_STRICT-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// PRMTD_STRICT-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// PRMTD_STRICT-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// PRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// PRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// PRMTD_STRICT-NEXT:    [[EXT:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR4:[0-9]+]]
// PRMTD_STRICT-NEXT:    [[EXT1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// PRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// PRMTD_STRICT-NEXT:    [[EXT2:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[B_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[EXT3:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[B_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP0:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT1]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP0]], double [[TMP1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT2]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP4:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT3]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP5:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP3]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP6:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT1]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP7:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP8:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP6]], double [[TMP7]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP9:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP2]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP8]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[UNPROMOTION:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP9]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[UNPROMOTION4:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// PRMTD_STRICT-NEXT:    store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
// PRMTD_STRICT-NEXT:    [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// PRMTD_STRICT-NEXT:    ret <2 x float> [[TMP11]]
//
_Complex float divf(_Complex float a, _Complex float b) {
  return a / b;
}

// FULL-LABEL: define dso_local <2 x float> @mulf(
// FULL-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
// FULL-NEXT:  entry:
// FULL-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    [[COERCE:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// FULL-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// FULL-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// FULL-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// FULL-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// FULL-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// FULL-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// FULL-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// FULL-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// FULL-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// FULL-NEXT:    [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// FULL-NEXT:    [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// FULL-NEXT:    [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// FULL-NEXT:    [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// FULL-NEXT:    [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
// FULL-NEXT:    [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
// FULL-NEXT:    [[ISNAN_CMP:%.*]] = fcmp uno float [[MUL_R]], [[MUL_R]]
// FULL-NEXT:    br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2:![0-9]+]]
// FULL:       complex_mul_imag_nan:
// FULL-NEXT:    [[ISNAN_CMP1:%.*]] = fcmp uno float [[MUL_I]], [[MUL_I]]
// FULL-NEXT:    br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
// FULL:       complex_mul_libcall:
// FULL-NEXT:    [[CALL:%.*]] = call <2 x float> @__mulsc3(float noundef [[A_REAL]], float noundef [[A_IMAG]], float noundef [[B_REAL]], float noundef [[B_IMAG]]) #[[ATTR2]]
// FULL-NEXT:    store <2 x float> [[CALL]], ptr [[COERCE]], align 4
// FULL-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
// FULL-NEXT:    [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
// FULL-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
// FULL-NEXT:    [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
// FULL-NEXT:    br label [[COMPLEX_MUL_CONT]]
// FULL:       complex_mul_cont:
// FULL-NEXT:    [[REAL_MUL_PHI:%.*]] = phi float [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_REAL]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi float [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_IMAG]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// FULL-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// FULL-NEXT:    store float [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 4
// FULL-NEXT:    store float [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 4
// FULL-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// FULL-NEXT:    ret <2 x float> [[TMP0]]
//
// BASIC-LABEL: define dso_local <2 x float> @mulf(
// BASIC-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
// BASIC-NEXT:  entry:
// BASIC-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// BASIC-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// BASIC-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// BASIC-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// BASIC-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// BASIC-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// BASIC-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// BASIC-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// BASIC-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// BASIC-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// BASIC-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// BASIC-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// BASIC-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// BASIC-NEXT:    [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// BASIC-NEXT:    [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// BASIC-NEXT:    [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// BASIC-NEXT:    [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// BASIC-NEXT:    [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
// BASIC-NEXT:    [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
// BASIC-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// BASIC-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// BASIC-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
// BASIC-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
// BASIC-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// BASIC-NEXT:    ret <2 x float> [[TMP0]]
//
// IMPRVD-LABEL: define dso_local <2 x float> @mulf(
// IMPRVD-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
// IMPRVD-NEXT:  entry:
// IMPRVD-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// IMPRVD-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// IMPRVD-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// IMPRVD-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// IMPRVD-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// IMPRVD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// IMPRVD-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// IMPRVD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// IMPRVD-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// IMPRVD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// IMPRVD-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// IMPRVD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// IMPRVD-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// IMPRVD-NEXT:    [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// IMPRVD-NEXT:    [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// IMPRVD-NEXT:    [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// IMPRVD-NEXT:    [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// IMPRVD-NEXT:    [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
// IMPRVD-NEXT:    [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
// IMPRVD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
// IMPRVD-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
// IMPRVD-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// IMPRVD-NEXT:    ret <2 x float> [[TMP0]]
//
// PRMTD-LABEL: define dso_local <2 x float> @mulf(
// PRMTD-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
// PRMTD-NEXT:  entry:
// PRMTD-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// PRMTD-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// PRMTD-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// PRMTD-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// PRMTD-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// PRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// PRMTD-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// PRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// PRMTD-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// PRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// PRMTD-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// PRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// PRMTD-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// PRMTD-NEXT:    [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// PRMTD-NEXT:    [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// PRMTD-NEXT:    [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// PRMTD-NEXT:    [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// PRMTD-NEXT:    [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
// PRMTD-NEXT:    [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
// PRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
// PRMTD-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
// PRMTD-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// PRMTD-NEXT:    ret <2 x float> [[TMP0]]
//
// X86WINPRMTD-LABEL: define dso_local i64 @mulf(
// X86WINPRMTD-SAME: i64 noundef [[A_COERCE:%.*]], i64 noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
// X86WINPRMTD-NEXT:  entry:
// X86WINPRMTD-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD-NEXT:    store i64 [[A_COERCE]], ptr [[A]], align 4
// X86WINPRMTD-NEXT:    store i64 [[B_COERCE]], ptr [[B]], align 4
// X86WINPRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// X86WINPRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// X86WINPRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// X86WINPRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// X86WINPRMTD-NEXT:    [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
// X86WINPRMTD-NEXT:    [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
// X86WINPRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
// X86WINPRMTD-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
// X86WINPRMTD-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RETVAL]], align 4
// X86WINPRMTD-NEXT:    ret i64 [[TMP0]]
//
// AVRFP32-LABEL: define dso_local { float, float } @mulf(
// AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP32-NEXT:  entry:
// AVRFP32-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[A:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[B:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP32-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP32-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP32-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
// AVRFP32-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
// AVRFP32-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
// AVRFP32-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
// AVRFP32-NEXT:    [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// AVRFP32-NEXT:    [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// AVRFP32-NEXT:    [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// AVRFP32-NEXT:    [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// AVRFP32-NEXT:    [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
// AVRFP32-NEXT:    [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
// AVRFP32-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP32-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 1
// AVRFP32-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP4:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP32-NEXT:    ret { float, float } [[TMP4]]
//
// AVRFP64-LABEL: define dso_local { float, float } @mulf(
// AVRFP64-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP64-NEXT:  entry:
// AVRFP64-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP64-NEXT:    [[A:%.*]] = alloca { float, float }, align 1
// AVRFP64-NEXT:    [[B:%.*]] = alloca { float, float }, align 1
// AVRFP64-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP64-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    store float [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP64-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    store float [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
// AVRFP64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
// AVRFP64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
// AVRFP64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
// AVRFP64-NEXT:    [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// AVRFP64-NEXT:    [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// AVRFP64-NEXT:    [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// AVRFP64-NEXT:    [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// AVRFP64-NEXT:    [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
// AVRFP64-NEXT:    [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
// AVRFP64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP64-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 1
// AVRFP64-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP64-NEXT:    [[TMP4:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP64-NEXT:    ret { float, float } [[TMP4]]
//
// BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @mulf(
// BASIC_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0]] {
// BASIC_FAST-NEXT:  entry:
// BASIC_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// BASIC_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// BASIC_FAST-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// BASIC_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// BASIC_FAST-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// BASIC_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// BASIC_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// BASIC_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// BASIC_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
// BASIC_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
// BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// BASIC_FAST-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
// BASIC_FAST-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
// BASIC_FAST-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// BASIC_FAST-NEXT:    ret <2 x float> [[TMP0]]
//
// FULL_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @mulf(
// FULL_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0]] {
// FULL_FAST-NEXT:  entry:
// FULL_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    [[COERCE:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// FULL_FAST-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// FULL_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// FULL_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// FULL_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// FULL_FAST-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// FULL_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// FULL_FAST-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// FULL_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
// FULL_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
// FULL_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
// FULL_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
// FULL_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
// FULL_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
// FULL_FAST-NEXT:    [[ISNAN_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno float [[MUL_R]], [[MUL_R]]
// FULL_FAST-NEXT:    br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2:![0-9]+]]
// FULL_FAST:       complex_mul_imag_nan:
// FULL_FAST-NEXT:    [[ISNAN_CMP1:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno float [[MUL_I]], [[MUL_I]]
// FULL_FAST-NEXT:    br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
// FULL_FAST:       complex_mul_libcall:
// FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) <2 x float> @__mulsc3(float noundef nofpclass(nan inf) [[A_REAL]], float noundef nofpclass(nan inf) [[A_IMAG]], float noundef nofpclass(nan inf) [[B_REAL]], float noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
// FULL_FAST-NEXT:    store <2 x float> [[CALL]], ptr [[COERCE]], align 4
// FULL_FAST-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
// FULL_FAST-NEXT:    [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
// FULL_FAST-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
// FULL_FAST-NEXT:    [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
// FULL_FAST-NEXT:    br label [[COMPLEX_MUL_CONT]]
// FULL_FAST:       complex_mul_cont:
// FULL_FAST-NEXT:    [[REAL_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_REAL]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL_FAST-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_IMAG]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// FULL_FAST-NEXT:    store float [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 4
// FULL_FAST-NEXT:    store float [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 4
// FULL_FAST-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// FULL_FAST-NEXT:    ret <2 x float> [[TMP0]]
//
// IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @mulf(
// IMPRVD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0]] {
// IMPRVD_FAST-NEXT:  entry:
// IMPRVD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// IMPRVD_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// IMPRVD_FAST-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// IMPRVD_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// IMPRVD_FAST-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// IMPRVD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// IMPRVD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// IMPRVD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// IMPRVD_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
// IMPRVD_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
// IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
// IMPRVD_FAST-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
// IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// IMPRVD_FAST-NEXT:    ret <2 x float> [[TMP0]]
//
// PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @mulf(
// PRMTD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0]] {
// PRMTD_FAST-NEXT:  entry:
// PRMTD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// PRMTD_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// PRMTD_FAST-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// PRMTD_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// PRMTD_FAST-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// PRMTD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// PRMTD_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
// PRMTD_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
// PRMTD_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
// PRMTD_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
// PRMTD_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
// PRMTD_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
// PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
// PRMTD_FAST-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
// PRMTD_FAST-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// PRMTD_FAST-NEXT:    ret <2 x float> [[TMP0]]
//
// X86WINPRMTD_STRICT-LABEL: define dso_local i64 @mulf(
// X86WINPRMTD_STRICT-SAME: i64 noundef [[A_COERCE:%.*]], i64 noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
// X86WINPRMTD_STRICT-NEXT:  entry:
// X86WINPRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD_STRICT-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD_STRICT-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD_STRICT-NEXT:    store i64 [[A_COERCE]], ptr [[A]], align 4
// X86WINPRMTD_STRICT-NEXT:    store i64 [[B_COERCE]], ptr [[B]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[MUL_AC:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_REAL]], float [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_BD:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_IMAG]], float [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_AD:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_REAL]], float [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_BC:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_IMAG]], float [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_R:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[MUL_AC]], float [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_I:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[MUL_AD]], float [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
// X86WINPRMTD_STRICT-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[TMP0:%.*]] = load i64, ptr [[RETVAL]], align 4
// X86WINPRMTD_STRICT-NEXT:    ret i64 [[TMP0]]
//
// PRMTD_STRICT-LABEL: define dso_local <2 x float> @mulf(
// PRMTD_STRICT-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
// PRMTD_STRICT-NEXT:  entry:
// PRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// PRMTD_STRICT-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// PRMTD_STRICT-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// PRMTD_STRICT-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// PRMTD_STRICT-NEXT:    store <2 x float> [[B_COERCE]], ptr [[B]], align 4
// PRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// PRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// PRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// PRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// PRMTD_STRICT-NEXT:    [[MUL_AC:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_REAL]], float [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_BD:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_IMAG]], float [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_AD:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_REAL]], float [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_BC:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_IMAG]], float [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_R:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[MUL_AC]], float [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_I:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[MUL_AD]], float [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
// PRMTD_STRICT-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
// PRMTD_STRICT-NEXT:    [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// PRMTD_STRICT-NEXT:    ret <2 x float> [[TMP0]]
//
_Complex float mulf(_Complex float a, _Complex float b) {
  return a * b;
}

// FULL-LABEL: define dso_local { double, double } @divd(
// FULL-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
// FULL-NEXT:  entry:
// FULL-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// FULL-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// FULL-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// FULL-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// FULL-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// FULL-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// FULL-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// FULL-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// FULL-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// FULL-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// FULL-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// FULL-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// FULL-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// FULL-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// FULL-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// FULL-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// FULL-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// FULL-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// FULL-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// FULL-NEXT:    [[CALL:%.*]] = call { double, double } @__divdc3(double noundef [[A_REAL]], double noundef [[A_IMAG]], double noundef [[B_REAL]], double noundef [[B_IMAG]]) #[[ATTR2]]
// FULL-NEXT:    [[TMP4:%.*]] = extractvalue { double, double } [[CALL]], 0
// FULL-NEXT:    [[TMP5:%.*]] = extractvalue { double, double } [[CALL]], 1
// FULL-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// FULL-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// FULL-NEXT:    store double [[TMP4]], ptr [[RETVAL_REALP]], align 8
// FULL-NEXT:    store double [[TMP5]], ptr [[RETVAL_IMAGP]], align 8
// FULL-NEXT:    [[TMP6:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// FULL-NEXT:    ret { double, double } [[TMP6]]
//
// BASIC-LABEL: define dso_local { double, double } @divd(
// BASIC-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
// BASIC-NEXT:  entry:
// BASIC-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// BASIC-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// BASIC-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// BASIC-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// BASIC-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// BASIC-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// BASIC-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// BASIC-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// BASIC-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// BASIC-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// BASIC-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// BASIC-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// BASIC-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// BASIC-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// BASIC-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// BASIC-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// BASIC-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// BASIC-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// BASIC-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// BASIC-NEXT:    [[TMP4:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
// BASIC-NEXT:    [[TMP5:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
// BASIC-NEXT:    [[TMP6:%.*]] = fadd double [[TMP4]], [[TMP5]]
// BASIC-NEXT:    [[TMP7:%.*]] = fmul double [[B_REAL]], [[B_REAL]]
// BASIC-NEXT:    [[TMP8:%.*]] = fmul double [[B_IMAG]], [[B_IMAG]]
// BASIC-NEXT:    [[TMP9:%.*]] = fadd double [[TMP7]], [[TMP8]]
// BASIC-NEXT:    [[TMP10:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
// BASIC-NEXT:    [[TMP11:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
// BASIC-NEXT:    [[TMP12:%.*]] = fsub double [[TMP10]], [[TMP11]]
// BASIC-NEXT:    [[TMP13:%.*]] = fdiv double [[TMP6]], [[TMP9]]
// BASIC-NEXT:    [[TMP14:%.*]] = fdiv double [[TMP12]], [[TMP9]]
// BASIC-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// BASIC-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// BASIC-NEXT:    store double [[TMP13]], ptr [[RETVAL_REALP]], align 8
// BASIC-NEXT:    store double [[TMP14]], ptr [[RETVAL_IMAGP]], align 8
// BASIC-NEXT:    [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// BASIC-NEXT:    ret { double, double } [[TMP15]]
//
// IMPRVD-LABEL: define dso_local { double, double } @divd(
// IMPRVD-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR2:[0-9]+]] {
// IMPRVD-NEXT:  entry:
// IMPRVD-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// IMPRVD-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// IMPRVD-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// IMPRVD-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// IMPRVD-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// IMPRVD-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// IMPRVD-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// IMPRVD-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// IMPRVD-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// IMPRVD-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// IMPRVD-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// IMPRVD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// IMPRVD-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// IMPRVD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// IMPRVD-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// IMPRVD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// IMPRVD-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// IMPRVD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// IMPRVD-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// IMPRVD-NEXT:    [[TMP4:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]])
// IMPRVD-NEXT:    [[TMP5:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]])
// IMPRVD-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt double [[TMP4]], [[TMP5]]
// IMPRVD-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// IMPRVD:       abs_rhsr_greater_or_equal_abs_rhsi:
// IMPRVD-NEXT:    [[TMP6:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
// IMPRVD-NEXT:    [[TMP7:%.*]] = fmul double [[TMP6]], [[B_IMAG]]
// IMPRVD-NEXT:    [[TMP8:%.*]] = fadd double [[B_REAL]], [[TMP7]]
// IMPRVD-NEXT:    [[TMP9:%.*]] = fmul double [[A_IMAG]], [[TMP6]]
// IMPRVD-NEXT:    [[TMP10:%.*]] = fadd double [[A_REAL]], [[TMP9]]
// IMPRVD-NEXT:    [[TMP11:%.*]] = fdiv double [[TMP10]], [[TMP8]]
// IMPRVD-NEXT:    [[TMP12:%.*]] = fmul double [[A_REAL]], [[TMP6]]
// IMPRVD-NEXT:    [[TMP13:%.*]] = fsub double [[A_IMAG]], [[TMP12]]
// IMPRVD-NEXT:    [[TMP14:%.*]] = fdiv double [[TMP13]], [[TMP8]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV:%.*]]
// IMPRVD:       abs_rhsr_less_than_abs_rhsi:
// IMPRVD-NEXT:    [[TMP15:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
// IMPRVD-NEXT:    [[TMP16:%.*]] = fmul double [[TMP15]], [[B_REAL]]
// IMPRVD-NEXT:    [[TMP17:%.*]] = fadd double [[B_IMAG]], [[TMP16]]
// IMPRVD-NEXT:    [[TMP18:%.*]] = fmul double [[A_REAL]], [[TMP15]]
// IMPRVD-NEXT:    [[TMP19:%.*]] = fadd double [[TMP18]], [[A_IMAG]]
// IMPRVD-NEXT:    [[TMP20:%.*]] = fdiv double [[TMP19]], [[TMP17]]
// IMPRVD-NEXT:    [[TMP21:%.*]] = fmul double [[A_IMAG]], [[TMP15]]
// IMPRVD-NEXT:    [[TMP22:%.*]] = fsub double [[TMP21]], [[A_REAL]]
// IMPRVD-NEXT:    [[TMP23:%.*]] = fdiv double [[TMP22]], [[TMP17]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV]]
// IMPRVD:       complex_div:
// IMPRVD-NEXT:    [[TMP24:%.*]] = phi double [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD-NEXT:    [[TMP25:%.*]] = phi double [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD-NEXT:    store double [[TMP24]], ptr [[RETVAL_REALP]], align 8
// IMPRVD-NEXT:    store double [[TMP25]], ptr [[RETVAL_IMAGP]], align 8
// IMPRVD-NEXT:    [[TMP26:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// IMPRVD-NEXT:    ret { double, double } [[TMP26]]
//
// PRMTD-LABEL: define dso_local { double, double } @divd(
// PRMTD-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
// PRMTD-NEXT:  entry:
// PRMTD-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// PRMTD-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// PRMTD-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// PRMTD-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// PRMTD-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// PRMTD-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// PRMTD-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// PRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// PRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// PRMTD-NEXT:    [[EXT:%.*]] = fpext double [[A_REAL]] to x86_fp80
// PRMTD-NEXT:    [[EXT1:%.*]] = fpext double [[A_IMAG]] to x86_fp80
// PRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// PRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// PRMTD-NEXT:    [[EXT2:%.*]] = fpext double [[B_REAL]] to x86_fp80
// PRMTD-NEXT:    [[EXT3:%.*]] = fpext double [[B_IMAG]] to x86_fp80
// PRMTD-NEXT:    [[TMP4:%.*]] = fmul x86_fp80 [[EXT]], [[EXT2]]
// PRMTD-NEXT:    [[TMP5:%.*]] = fmul x86_fp80 [[EXT1]], [[EXT3]]
// PRMTD-NEXT:    [[TMP6:%.*]] = fadd x86_fp80 [[TMP4]], [[TMP5]]
// PRMTD-NEXT:    [[TMP7:%.*]] = fmul x86_fp80 [[EXT2]], [[EXT2]]
// PRMTD-NEXT:    [[TMP8:%.*]] = fmul x86_fp80 [[EXT3]], [[EXT3]]
// PRMTD-NEXT:    [[TMP9:%.*]] = fadd x86_fp80 [[TMP7]], [[TMP8]]
// PRMTD-NEXT:    [[TMP10:%.*]] = fmul x86_fp80 [[EXT1]], [[EXT2]]
// PRMTD-NEXT:    [[TMP11:%.*]] = fmul x86_fp80 [[EXT]], [[EXT3]]
// PRMTD-NEXT:    [[TMP12:%.*]] = fsub x86_fp80 [[TMP10]], [[TMP11]]
// PRMTD-NEXT:    [[TMP13:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP9]]
// PRMTD-NEXT:    [[TMP14:%.*]] = fdiv x86_fp80 [[TMP12]], [[TMP9]]
// PRMTD-NEXT:    [[UNPROMOTION:%.*]] = fptrunc x86_fp80 [[TMP13]] to double
// PRMTD-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc x86_fp80 [[TMP14]] to double
// PRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD-NEXT:    store double [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 8
// PRMTD-NEXT:    store double [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 8
// PRMTD-NEXT:    [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// PRMTD-NEXT:    ret { double, double } [[TMP15]]
//
// X86WINPRMTD-LABEL: define dso_local void @divd(
// X86WINPRMTD-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
// X86WINPRMTD-NEXT:  entry:
// X86WINPRMTD-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
// X86WINPRMTD-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD-NEXT:    store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
// X86WINPRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// X86WINPRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]])
// X86WINPRMTD-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]])
// X86WINPRMTD-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt double [[TMP0]], [[TMP1]]
// X86WINPRMTD-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// X86WINPRMTD:       abs_rhsr_greater_or_equal_abs_rhsi:
// X86WINPRMTD-NEXT:    [[TMP2:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[TMP3:%.*]] = fmul double [[TMP2]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[TMP4:%.*]] = fadd double [[B_REAL]], [[TMP3]]
// X86WINPRMTD-NEXT:    [[TMP5:%.*]] = fmul double [[A_IMAG]], [[TMP2]]
// X86WINPRMTD-NEXT:    [[TMP6:%.*]] = fadd double [[A_REAL]], [[TMP5]]
// X86WINPRMTD-NEXT:    [[TMP7:%.*]] = fdiv double [[TMP6]], [[TMP4]]
// X86WINPRMTD-NEXT:    [[TMP8:%.*]] = fmul double [[A_REAL]], [[TMP2]]
// X86WINPRMTD-NEXT:    [[TMP9:%.*]] = fsub double [[A_IMAG]], [[TMP8]]
// X86WINPRMTD-NEXT:    [[TMP10:%.*]] = fdiv double [[TMP9]], [[TMP4]]
// X86WINPRMTD-NEXT:    br label [[COMPLEX_DIV:%.*]]
// X86WINPRMTD:       abs_rhsr_less_than_abs_rhsi:
// X86WINPRMTD-NEXT:    [[TMP11:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[TMP12:%.*]] = fmul double [[TMP11]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[TMP13:%.*]] = fadd double [[B_IMAG]], [[TMP12]]
// X86WINPRMTD-NEXT:    [[TMP14:%.*]] = fmul double [[A_REAL]], [[TMP11]]
// X86WINPRMTD-NEXT:    [[TMP15:%.*]] = fadd double [[TMP14]], [[A_IMAG]]
// X86WINPRMTD-NEXT:    [[TMP16:%.*]] = fdiv double [[TMP15]], [[TMP13]]
// X86WINPRMTD-NEXT:    [[TMP17:%.*]] = fmul double [[A_IMAG]], [[TMP11]]
// X86WINPRMTD-NEXT:    [[TMP18:%.*]] = fsub double [[TMP17]], [[A_REAL]]
// X86WINPRMTD-NEXT:    [[TMP19:%.*]] = fdiv double [[TMP18]], [[TMP13]]
// X86WINPRMTD-NEXT:    br label [[COMPLEX_DIV]]
// X86WINPRMTD:       complex_div:
// X86WINPRMTD-NEXT:    [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD-NEXT:    [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store double [[TMP20]], ptr [[AGG_RESULT_REALP]], align 8
// X86WINPRMTD-NEXT:    store double [[TMP21]], ptr [[AGG_RESULT_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
// X86WINPRMTD-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
// X86WINPRMTD-NEXT:    ret void
//
// AVRFP32-LABEL: define dso_local { float, float } @divd(
// AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP32-NEXT:  entry:
// AVRFP32-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// AVRFP32-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// AVRFP32-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 4
// AVRFP32-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 4
// AVRFP32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[B_COERCE0]], ptr [[TMP2]], align 4
// AVRFP32-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[B_COERCE1]], ptr [[TMP3]], align 4
// AVRFP32-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// AVRFP32-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// AVRFP32-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// AVRFP32-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// AVRFP32-NEXT:    [[TMP4:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_REAL]])
// AVRFP32-NEXT:    [[TMP5:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_IMAG]])
// AVRFP32-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt float [[TMP4]], [[TMP5]]
// AVRFP32-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// AVRFP32:       abs_rhsr_greater_or_equal_abs_rhsi:
// AVRFP32-NEXT:    [[TMP6:%.*]] = fdiv float [[B_IMAG]], [[B_REAL]]
// AVRFP32-NEXT:    [[TMP7:%.*]] = fmul float [[TMP6]], [[B_IMAG]]
// AVRFP32-NEXT:    [[TMP8:%.*]] = fadd float [[B_REAL]], [[TMP7]]
// AVRFP32-NEXT:    [[TMP9:%.*]] = fmul float [[A_IMAG]], [[TMP6]]
// AVRFP32-NEXT:    [[TMP10:%.*]] = fadd float [[A_REAL]], [[TMP9]]
// AVRFP32-NEXT:    [[TMP11:%.*]] = fdiv float [[TMP10]], [[TMP8]]
// AVRFP32-NEXT:    [[TMP12:%.*]] = fmul float [[A_REAL]], [[TMP6]]
// AVRFP32-NEXT:    [[TMP13:%.*]] = fsub float [[A_IMAG]], [[TMP12]]
// AVRFP32-NEXT:    [[TMP14:%.*]] = fdiv float [[TMP13]], [[TMP8]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV:%.*]]
// AVRFP32:       abs_rhsr_less_than_abs_rhsi:
// AVRFP32-NEXT:    [[TMP15:%.*]] = fdiv float [[B_REAL]], [[B_IMAG]]
// AVRFP32-NEXT:    [[TMP16:%.*]] = fmul float [[TMP15]], [[B_REAL]]
// AVRFP32-NEXT:    [[TMP17:%.*]] = fadd float [[B_IMAG]], [[TMP16]]
// AVRFP32-NEXT:    [[TMP18:%.*]] = fmul float [[A_REAL]], [[TMP15]]
// AVRFP32-NEXT:    [[TMP19:%.*]] = fadd float [[TMP18]], [[A_IMAG]]
// AVRFP32-NEXT:    [[TMP20:%.*]] = fdiv float [[TMP19]], [[TMP17]]
// AVRFP32-NEXT:    [[TMP21:%.*]] = fmul float [[A_IMAG]], [[TMP15]]
// AVRFP32-NEXT:    [[TMP22:%.*]] = fsub float [[TMP21]], [[A_REAL]]
// AVRFP32-NEXT:    [[TMP23:%.*]] = fdiv float [[TMP22]], [[TMP17]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV]]
// AVRFP32:       complex_div:
// AVRFP32-NEXT:    [[TMP24:%.*]] = phi float [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP32-NEXT:    [[TMP25:%.*]] = phi float [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP32-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP32-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[TMP24]], ptr [[RETVAL_REALP]], align 1
// AVRFP32-NEXT:    store float [[TMP25]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP26:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP32-NEXT:    ret { float, float } [[TMP26]]
//
// AVRFP64-LABEL: define dso_local void @divd(
// AVRFP64-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 1 [[AGG_RESULT:%.*]], double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP64-NEXT:  entry:
// AVRFP64-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// AVRFP64-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// AVRFP64-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// AVRFP64-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// AVRFP64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// AVRFP64-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// AVRFP64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// AVRFP64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// AVRFP64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// AVRFP64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// AVRFP64-NEXT:    [[TMP4:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[B_REAL]])
// AVRFP64-NEXT:    [[TMP5:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[B_IMAG]])
// AVRFP64-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt double [[TMP4]], [[TMP5]]
// AVRFP64-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// AVRFP64:       abs_rhsr_greater_or_equal_abs_rhsi:
// AVRFP64-NEXT:    [[TMP6:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
// AVRFP64-NEXT:    [[TMP7:%.*]] = fmul double [[TMP6]], [[B_IMAG]]
// AVRFP64-NEXT:    [[TMP8:%.*]] = fadd double [[B_REAL]], [[TMP7]]
// AVRFP64-NEXT:    [[TMP9:%.*]] = fmul double [[A_IMAG]], [[TMP6]]
// AVRFP64-NEXT:    [[TMP10:%.*]] = fadd double [[A_REAL]], [[TMP9]]
// AVRFP64-NEXT:    [[TMP11:%.*]] = fdiv double [[TMP10]], [[TMP8]]
// AVRFP64-NEXT:    [[TMP12:%.*]] = fmul double [[A_REAL]], [[TMP6]]
// AVRFP64-NEXT:    [[TMP13:%.*]] = fsub double [[A_IMAG]], [[TMP12]]
// AVRFP64-NEXT:    [[TMP14:%.*]] = fdiv double [[TMP13]], [[TMP8]]
// AVRFP64-NEXT:    br label [[COMPLEX_DIV:%.*]]
// AVRFP64:       abs_rhsr_less_than_abs_rhsi:
// AVRFP64-NEXT:    [[TMP15:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
// AVRFP64-NEXT:    [[TMP16:%.*]] = fmul double [[TMP15]], [[B_REAL]]
// AVRFP64-NEXT:    [[TMP17:%.*]] = fadd double [[B_IMAG]], [[TMP16]]
// AVRFP64-NEXT:    [[TMP18:%.*]] = fmul double [[A_REAL]], [[TMP15]]
// AVRFP64-NEXT:    [[TMP19:%.*]] = fadd double [[TMP18]], [[A_IMAG]]
// AVRFP64-NEXT:    [[TMP20:%.*]] = fdiv double [[TMP19]], [[TMP17]]
// AVRFP64-NEXT:    [[TMP21:%.*]] = fmul double [[A_IMAG]], [[TMP15]]
// AVRFP64-NEXT:    [[TMP22:%.*]] = fsub double [[TMP21]], [[A_REAL]]
// AVRFP64-NEXT:    [[TMP23:%.*]] = fdiv double [[TMP22]], [[TMP17]]
// AVRFP64-NEXT:    br label [[COMPLEX_DIV]]
// AVRFP64:       complex_div:
// AVRFP64-NEXT:    [[TMP24:%.*]] = phi double [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP64-NEXT:    [[TMP25:%.*]] = phi double [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP64-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[TMP24]], ptr [[AGG_RESULT_REALP]], align 1
// AVRFP64-NEXT:    store double [[TMP25]], ptr [[AGG_RESULT_IMAGP]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 1
// AVRFP64-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 1
// AVRFP64-NEXT:    ret void
//
// BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @divd(
// BASIC_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
// BASIC_FAST-NEXT:  entry:
// BASIC_FAST-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// BASIC_FAST-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// BASIC_FAST-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// BASIC_FAST-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// BASIC_FAST-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// BASIC_FAST-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// BASIC_FAST-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// BASIC_FAST-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// BASIC_FAST-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// BASIC_FAST-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// BASIC_FAST-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// BASIC_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// BASIC_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// BASIC_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// BASIC_FAST-NEXT:    [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP4]], [[TMP5]]
// BASIC_FAST-NEXT:    [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[B_REAL]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[B_IMAG]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[TMP9:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP7]], [[TMP8]]
// BASIC_FAST-NEXT:    [[TMP10:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[TMP11:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[TMP12:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP10]], [[TMP11]]
// BASIC_FAST-NEXT:    [[TMP13:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP6]], [[TMP9]]
// BASIC_FAST-NEXT:    [[TMP14:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP12]], [[TMP9]]
// BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// BASIC_FAST-NEXT:    store double [[TMP13]], ptr [[RETVAL_REALP]], align 8
// BASIC_FAST-NEXT:    store double [[TMP14]], ptr [[RETVAL_IMAGP]], align 8
// BASIC_FAST-NEXT:    [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// BASIC_FAST-NEXT:    ret { double, double } [[TMP15]]
//
// FULL_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @divd(
// FULL_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
// FULL_FAST-NEXT:  entry:
// FULL_FAST-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// FULL_FAST-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// FULL_FAST-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// FULL_FAST-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// FULL_FAST-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// FULL_FAST-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// FULL_FAST-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// FULL_FAST-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// FULL_FAST-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// FULL_FAST-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// FULL_FAST-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// FULL_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// FULL_FAST-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// FULL_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// FULL_FAST-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// FULL_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// FULL_FAST-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { double, double } @__divdc3(double noundef nofpclass(nan inf) [[A_REAL]], double noundef nofpclass(nan inf) [[A_IMAG]], double noundef nofpclass(nan inf) [[B_REAL]], double noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
// FULL_FAST-NEXT:    [[TMP4:%.*]] = extractvalue { double, double } [[CALL]], 0
// FULL_FAST-NEXT:    [[TMP5:%.*]] = extractvalue { double, double } [[CALL]], 1
// FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// FULL_FAST-NEXT:    store double [[TMP4]], ptr [[RETVAL_REALP]], align 8
// FULL_FAST-NEXT:    store double [[TMP5]], ptr [[RETVAL_IMAGP]], align 8
// FULL_FAST-NEXT:    [[TMP6:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// FULL_FAST-NEXT:    ret { double, double } [[TMP6]]
//
// IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @divd(
// IMPRVD_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR2:[0-9]+]] {
// IMPRVD_FAST-NEXT:  entry:
// IMPRVD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// IMPRVD_FAST-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// IMPRVD_FAST-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// IMPRVD_FAST-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// IMPRVD_FAST-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// IMPRVD_FAST-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// IMPRVD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// IMPRVD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// IMPRVD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// IMPRVD_FAST-NEXT:    [[TMP4:%.*]] = call reassoc nnan ninf nsz arcp afn double @llvm.fabs.f64(double [[B_REAL]])
// IMPRVD_FAST-NEXT:    [[TMP5:%.*]] = call reassoc nnan ninf nsz arcp afn double @llvm.fabs.f64(double [[B_IMAG]])
// IMPRVD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt double [[TMP4]], [[TMP5]]
// IMPRVD_FAST-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// IMPRVD_FAST:       abs_rhsr_greater_or_equal_abs_rhsi:
// IMPRVD_FAST-NEXT:    [[TMP6:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[B_IMAG]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[TMP6]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP8:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[B_REAL]], [[TMP7]]
// IMPRVD_FAST-NEXT:    [[TMP9:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[TMP6]]
// IMPRVD_FAST-NEXT:    [[TMP10:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[TMP9]]
// IMPRVD_FAST-NEXT:    [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP10]], [[TMP8]]
// IMPRVD_FAST-NEXT:    [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[TMP6]]
// IMPRVD_FAST-NEXT:    [[TMP13:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[TMP12]]
// IMPRVD_FAST-NEXT:    [[TMP14:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP13]], [[TMP8]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV:%.*]]
// IMPRVD_FAST:       abs_rhsr_less_than_abs_rhsi:
// IMPRVD_FAST-NEXT:    [[TMP15:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[B_REAL]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP16:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[TMP15]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP17:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[B_IMAG]], [[TMP16]]
// IMPRVD_FAST-NEXT:    [[TMP18:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[TMP15]]
// IMPRVD_FAST-NEXT:    [[TMP19:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP18]], [[A_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP20:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP19]], [[TMP17]]
// IMPRVD_FAST-NEXT:    [[TMP21:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[TMP15]]
// IMPRVD_FAST-NEXT:    [[TMP22:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP21]], [[A_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP23:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP22]], [[TMP17]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV]]
// IMPRVD_FAST:       complex_div:
// IMPRVD_FAST-NEXT:    [[TMP24:%.*]] = phi reassoc nnan ninf nsz arcp afn double [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD_FAST-NEXT:    [[TMP25:%.*]] = phi reassoc nnan ninf nsz arcp afn double [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store double [[TMP24]], ptr [[RETVAL_REALP]], align 8
// IMPRVD_FAST-NEXT:    store double [[TMP25]], ptr [[RETVAL_IMAGP]], align 8
// IMPRVD_FAST-NEXT:    [[TMP26:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// IMPRVD_FAST-NEXT:    ret { double, double } [[TMP26]]
//
// PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @divd(
// PRMTD_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
// PRMTD_FAST-NEXT:  entry:
// PRMTD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// PRMTD_FAST-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// PRMTD_FAST-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// PRMTD_FAST-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD_FAST-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// PRMTD_FAST-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// PRMTD_FAST-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD_FAST-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// PRMTD_FAST-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// PRMTD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext double [[A_REAL]] to x86_fp80
// PRMTD_FAST-NEXT:    [[EXT1:%.*]] = fpext double [[A_IMAG]] to x86_fp80
// PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// PRMTD_FAST-NEXT:    [[EXT2:%.*]] = fpext double [[B_REAL]] to x86_fp80
// PRMTD_FAST-NEXT:    [[EXT3:%.*]] = fpext double [[B_IMAG]] to x86_fp80
// PRMTD_FAST-NEXT:    [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT]], [[EXT2]]
// PRMTD_FAST-NEXT:    [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT1]], [[EXT3]]
// PRMTD_FAST-NEXT:    [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP4]], [[TMP5]]
// PRMTD_FAST-NEXT:    [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT2]], [[EXT2]]
// PRMTD_FAST-NEXT:    [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT3]], [[EXT3]]
// PRMTD_FAST-NEXT:    [[TMP9:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP7]], [[TMP8]]
// PRMTD_FAST-NEXT:    [[TMP10:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT1]], [[EXT2]]
// PRMTD_FAST-NEXT:    [[TMP11:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT]], [[EXT3]]
// PRMTD_FAST-NEXT:    [[TMP12:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP10]], [[TMP11]]
// PRMTD_FAST-NEXT:    [[TMP13:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP9]]
// PRMTD_FAST-NEXT:    [[TMP14:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP12]], [[TMP9]]
// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc x86_fp80 [[TMP13]] to double
// PRMTD_FAST-NEXT:    [[UNPROMOTION4:%.*]] = fptrunc x86_fp80 [[TMP14]] to double
// PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store double [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 8
// PRMTD_FAST-NEXT:    store double [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 8
// PRMTD_FAST-NEXT:    [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// PRMTD_FAST-NEXT:    ret { double, double } [[TMP15]]
//
// X86WINPRMTD_STRICT-LABEL: define dso_local void @divd(
// X86WINPRMTD_STRICT-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
// X86WINPRMTD_STRICT-NEXT:  entry:
// X86WINPRMTD_STRICT-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]]) #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]]) #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// X86WINPRMTD_STRICT:       abs_rhsr_greater_or_equal_abs_rhsi:
// X86WINPRMTD_STRICT-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[B_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP2]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP4:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_REAL]], double [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP5:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP6:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A_REAL]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP7:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP6]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP8:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP9:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A_IMAG]], double [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP9]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV:%.*]]
// X86WINPRMTD_STRICT:       abs_rhsr_less_than_abs_rhsi:
// X86WINPRMTD_STRICT-NEXT:    [[TMP11:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[B_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP11]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_IMAG]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP14:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP14]], double [[A_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP16:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP15]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP17:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP18:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP17]], double [[A_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP19:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP18]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV]]
// X86WINPRMTD_STRICT:       complex_div:
// X86WINPRMTD_STRICT-NEXT:    [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD_STRICT-NEXT:    [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store double [[TMP20]], ptr [[AGG_RESULT_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    store double [[TMP21]], ptr [[AGG_RESULT_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
// X86WINPRMTD_STRICT-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
// X86WINPRMTD_STRICT-NEXT:    ret void
//
// PRMTD_STRICT-LABEL: define dso_local { double, double } @divd(
// PRMTD_STRICT-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR2:[0-9]+]] {
// PRMTD_STRICT-NEXT:  entry:
// PRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// PRMTD_STRICT-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// PRMTD_STRICT-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// PRMTD_STRICT-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// PRMTD_STRICT-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// PRMTD_STRICT-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// PRMTD_STRICT-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// PRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// PRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// PRMTD_STRICT-NEXT:    [[EXT:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[EXT1:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// PRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// PRMTD_STRICT-NEXT:    [[EXT2:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double [[B_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[EXT3:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double [[B_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP4:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT]], x86_fp80 [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP5:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT1]], x86_fp80 [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP6:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[TMP4]], x86_fp80 [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP7:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT2]], x86_fp80 [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP8:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT3]], x86_fp80 [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP9:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[TMP7]], x86_fp80 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP10:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT1]], x86_fp80 [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP11:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT]], x86_fp80 [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP12:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[TMP10]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP13:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP6]], x86_fp80 [[TMP9]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP14:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP12]], x86_fp80 [[TMP9]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[UNPROMOTION:%.*]] = call double @llvm.experimental.constrained.fptrunc.f64.f80(x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[UNPROMOTION4:%.*]] = call double @llvm.experimental.constrained.fptrunc.f64.f80(x86_fp80 [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store double [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 8
// PRMTD_STRICT-NEXT:    store double [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 8
// PRMTD_STRICT-NEXT:    [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// PRMTD_STRICT-NEXT:    ret { double, double } [[TMP15]]
//
_Complex double divd(_Complex double a, _Complex double b) {
  return a / b;
}

// FULL-LABEL: define dso_local { double, double } @muld(
// FULL-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1]] {
// FULL-NEXT:  entry:
// FULL-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// FULL-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// FULL-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// FULL-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// FULL-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// FULL-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// FULL-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// FULL-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// FULL-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// FULL-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// FULL-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// FULL-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// FULL-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// FULL-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// FULL-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// FULL-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// FULL-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// FULL-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// FULL-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// FULL-NEXT:    [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
// FULL-NEXT:    [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
// FULL-NEXT:    [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
// FULL-NEXT:    [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
// FULL-NEXT:    [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
// FULL-NEXT:    [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
// FULL-NEXT:    [[ISNAN_CMP:%.*]] = fcmp uno double [[MUL_R]], [[MUL_R]]
// FULL-NEXT:    br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2]]
// FULL:       complex_mul_imag_nan:
// FULL-NEXT:    [[ISNAN_CMP1:%.*]] = fcmp uno double [[MUL_I]], [[MUL_I]]
// FULL-NEXT:    br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
// FULL:       complex_mul_libcall:
// FULL-NEXT:    [[CALL:%.*]] = call { double, double } @__muldc3(double noundef [[A_REAL]], double noundef [[A_IMAG]], double noundef [[B_REAL]], double noundef [[B_IMAG]]) #[[ATTR2]]
// FULL-NEXT:    [[TMP4:%.*]] = extractvalue { double, double } [[CALL]], 0
// FULL-NEXT:    [[TMP5:%.*]] = extractvalue { double, double } [[CALL]], 1
// FULL-NEXT:    br label [[COMPLEX_MUL_CONT]]
// FULL:       complex_mul_cont:
// FULL-NEXT:    [[REAL_MUL_PHI:%.*]] = phi double [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP4]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi double [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP5]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// FULL-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// FULL-NEXT:    store double [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 8
// FULL-NEXT:    store double [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 8
// FULL-NEXT:    [[TMP6:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// FULL-NEXT:    ret { double, double } [[TMP6]]
//
// BASIC-LABEL: define dso_local { double, double } @muld(
// BASIC-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1]] {
// BASIC-NEXT:  entry:
// BASIC-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// BASIC-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// BASIC-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// BASIC-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// BASIC-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// BASIC-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// BASIC-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// BASIC-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// BASIC-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// BASIC-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// BASIC-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// BASIC-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// BASIC-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// BASIC-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// BASIC-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// BASIC-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// BASIC-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// BASIC-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// BASIC-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// BASIC-NEXT:    [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
// BASIC-NEXT:    [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
// BASIC-NEXT:    [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
// BASIC-NEXT:    [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
// BASIC-NEXT:    [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
// BASIC-NEXT:    [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
// BASIC-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// BASIC-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// BASIC-NEXT:    store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
// BASIC-NEXT:    store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
// BASIC-NEXT:    [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// BASIC-NEXT:    ret { double, double } [[TMP4]]
//
// IMPRVD-LABEL: define dso_local { double, double } @muld(
// IMPRVD-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR2]] {
// IMPRVD-NEXT:  entry:
// IMPRVD-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// IMPRVD-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// IMPRVD-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// IMPRVD-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// IMPRVD-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// IMPRVD-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// IMPRVD-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// IMPRVD-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// IMPRVD-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// IMPRVD-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// IMPRVD-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// IMPRVD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// IMPRVD-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// IMPRVD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// IMPRVD-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// IMPRVD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// IMPRVD-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// IMPRVD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// IMPRVD-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// IMPRVD-NEXT:    [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
// IMPRVD-NEXT:    [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
// IMPRVD-NEXT:    [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
// IMPRVD-NEXT:    [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
// IMPRVD-NEXT:    [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
// IMPRVD-NEXT:    [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
// IMPRVD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD-NEXT:    store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
// IMPRVD-NEXT:    store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
// IMPRVD-NEXT:    [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// IMPRVD-NEXT:    ret { double, double } [[TMP4]]
//
// PRMTD-LABEL: define dso_local { double, double } @muld(
// PRMTD-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1]] {
// PRMTD-NEXT:  entry:
// PRMTD-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// PRMTD-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// PRMTD-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// PRMTD-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// PRMTD-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// PRMTD-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// PRMTD-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// PRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// PRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// PRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// PRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// PRMTD-NEXT:    [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
// PRMTD-NEXT:    [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
// PRMTD-NEXT:    [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
// PRMTD-NEXT:    [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
// PRMTD-NEXT:    [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
// PRMTD-NEXT:    [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
// PRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD-NEXT:    store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
// PRMTD-NEXT:    store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
// PRMTD-NEXT:    [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// PRMTD-NEXT:    ret { double, double } [[TMP4]]
//
// X86WINPRMTD-LABEL: define dso_local void @muld(
// X86WINPRMTD-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
// X86WINPRMTD-NEXT:  entry:
// X86WINPRMTD-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
// X86WINPRMTD-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD-NEXT:    store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
// X86WINPRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// X86WINPRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
// X86WINPRMTD-NEXT:    [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 8
// X86WINPRMTD-NEXT:    store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
// X86WINPRMTD-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
// X86WINPRMTD-NEXT:    ret void
//
// AVRFP32-LABEL: define dso_local { float, float } @muld(
// AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP32-NEXT:  entry:
// AVRFP32-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// AVRFP32-NEXT:    [[B:%.*]] = alloca { float, float }, align 4
// AVRFP32-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 4
// AVRFP32-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 4
// AVRFP32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[B_COERCE0]], ptr [[TMP2]], align 4
// AVRFP32-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[B_COERCE1]], ptr [[TMP3]], align 4
// AVRFP32-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// AVRFP32-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// AVRFP32-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
// AVRFP32-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
// AVRFP32-NEXT:    [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// AVRFP32-NEXT:    [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// AVRFP32-NEXT:    [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// AVRFP32-NEXT:    [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// AVRFP32-NEXT:    [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
// AVRFP32-NEXT:    [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
// AVRFP32-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP32-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 1
// AVRFP32-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP4:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP32-NEXT:    ret { float, float } [[TMP4]]
//
// AVRFP64-LABEL: define dso_local void @muld(
// AVRFP64-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 1 [[AGG_RESULT:%.*]], double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP64-NEXT:  entry:
// AVRFP64-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// AVRFP64-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// AVRFP64-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// AVRFP64-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// AVRFP64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// AVRFP64-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// AVRFP64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// AVRFP64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// AVRFP64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// AVRFP64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// AVRFP64-NEXT:    [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
// AVRFP64-NEXT:    [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
// AVRFP64-NEXT:    [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
// AVRFP64-NEXT:    [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
// AVRFP64-NEXT:    [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
// AVRFP64-NEXT:    [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
// AVRFP64-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 1
// AVRFP64-NEXT:    store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 1
// AVRFP64-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 1
// AVRFP64-NEXT:    ret void
//
// BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @muld(
// BASIC_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1]] {
// BASIC_FAST-NEXT:  entry:
// BASIC_FAST-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// BASIC_FAST-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// BASIC_FAST-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// BASIC_FAST-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// BASIC_FAST-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// BASIC_FAST-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// BASIC_FAST-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// BASIC_FAST-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// BASIC_FAST-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// BASIC_FAST-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// BASIC_FAST-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// BASIC_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// BASIC_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// BASIC_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// BASIC_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[MUL_AC]], [[MUL_BD]]
// BASIC_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[MUL_AD]], [[MUL_BC]]
// BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// BASIC_FAST-NEXT:    store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
// BASIC_FAST-NEXT:    store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
// BASIC_FAST-NEXT:    [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// BASIC_FAST-NEXT:    ret { double, double } [[TMP4]]
//
// FULL_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @muld(
// FULL_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1]] {
// FULL_FAST-NEXT:  entry:
// FULL_FAST-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// FULL_FAST-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// FULL_FAST-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// FULL_FAST-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// FULL_FAST-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// FULL_FAST-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// FULL_FAST-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// FULL_FAST-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// FULL_FAST-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// FULL_FAST-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// FULL_FAST-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// FULL_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// FULL_FAST-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// FULL_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// FULL_FAST-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// FULL_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// FULL_FAST-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// FULL_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
// FULL_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
// FULL_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
// FULL_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
// FULL_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[MUL_AC]], [[MUL_BD]]
// FULL_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[MUL_AD]], [[MUL_BC]]
// FULL_FAST-NEXT:    [[ISNAN_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno double [[MUL_R]], [[MUL_R]]
// FULL_FAST-NEXT:    br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2]]
// FULL_FAST:       complex_mul_imag_nan:
// FULL_FAST-NEXT:    [[ISNAN_CMP1:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno double [[MUL_I]], [[MUL_I]]
// FULL_FAST-NEXT:    br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
// FULL_FAST:       complex_mul_libcall:
// FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { double, double } @__muldc3(double noundef nofpclass(nan inf) [[A_REAL]], double noundef nofpclass(nan inf) [[A_IMAG]], double noundef nofpclass(nan inf) [[B_REAL]], double noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
// FULL_FAST-NEXT:    [[TMP4:%.*]] = extractvalue { double, double } [[CALL]], 0
// FULL_FAST-NEXT:    [[TMP5:%.*]] = extractvalue { double, double } [[CALL]], 1
// FULL_FAST-NEXT:    br label [[COMPLEX_MUL_CONT]]
// FULL_FAST:       complex_mul_cont:
// FULL_FAST-NEXT:    [[REAL_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn double [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP4]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL_FAST-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn double [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP5]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// FULL_FAST-NEXT:    store double [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 8
// FULL_FAST-NEXT:    store double [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 8
// FULL_FAST-NEXT:    [[TMP6:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// FULL_FAST-NEXT:    ret { double, double } [[TMP6]]
//
// IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @muld(
// IMPRVD_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR2]] {
// IMPRVD_FAST-NEXT:  entry:
// IMPRVD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// IMPRVD_FAST-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// IMPRVD_FAST-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// IMPRVD_FAST-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// IMPRVD_FAST-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// IMPRVD_FAST-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// IMPRVD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// IMPRVD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// IMPRVD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// IMPRVD_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[MUL_AC]], [[MUL_BD]]
// IMPRVD_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[MUL_AD]], [[MUL_BC]]
// IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
// IMPRVD_FAST-NEXT:    store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
// IMPRVD_FAST-NEXT:    [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// IMPRVD_FAST-NEXT:    ret { double, double } [[TMP4]]
//
// PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @muld(
// PRMTD_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1]] {
// PRMTD_FAST-NEXT:  entry:
// PRMTD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// PRMTD_FAST-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// PRMTD_FAST-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// PRMTD_FAST-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD_FAST-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// PRMTD_FAST-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// PRMTD_FAST-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD_FAST-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// PRMTD_FAST-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// PRMTD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// PRMTD_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
// PRMTD_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
// PRMTD_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
// PRMTD_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
// PRMTD_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[MUL_AC]], [[MUL_BD]]
// PRMTD_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[MUL_AD]], [[MUL_BC]]
// PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
// PRMTD_FAST-NEXT:    store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
// PRMTD_FAST-NEXT:    [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// PRMTD_FAST-NEXT:    ret { double, double } [[TMP4]]
//
// X86WINPRMTD_STRICT-LABEL: define dso_local void @muld(
// X86WINPRMTD_STRICT-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
// X86WINPRMTD_STRICT-NEXT:  entry:
// X86WINPRMTD_STRICT-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[MUL_AC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_BD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_AD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_BC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_R:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[MUL_AC]], double [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[MUL_AD]], double [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
// X86WINPRMTD_STRICT-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
// X86WINPRMTD_STRICT-NEXT:    ret void
//
// PRMTD_STRICT-LABEL: define dso_local { double, double } @muld(
// PRMTD_STRICT-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR2]] {
// PRMTD_STRICT-NEXT:  entry:
// PRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { double, double }, align 8
// PRMTD_STRICT-NEXT:    [[A:%.*]] = alloca { double, double }, align 8
// PRMTD_STRICT-NEXT:    [[B:%.*]] = alloca { double, double }, align 8
// PRMTD_STRICT-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 8
// PRMTD_STRICT-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 8
// PRMTD_STRICT-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 8
// PRMTD_STRICT-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 8
// PRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// PRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// PRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// PRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// PRMTD_STRICT-NEXT:    [[MUL_AC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_BD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_AD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_BC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_R:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[MUL_AC]], double [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[MUL_AD]], double [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
// PRMTD_STRICT-NEXT:    store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
// PRMTD_STRICT-NEXT:    [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
// PRMTD_STRICT-NEXT:    ret { double, double } [[TMP4]]
//
_Complex double muld(_Complex double a, _Complex double b) {
  return a * b;
}

// FULL-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
// FULL-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// FULL-NEXT:  entry:
// FULL-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// FULL-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// FULL-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// FULL-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// FULL-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// FULL-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// FULL-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// FULL-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// FULL-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// FULL-NEXT:    [[CALL:%.*]] = call { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef [[A_REAL]], x86_fp80 noundef [[A_IMAG]], x86_fp80 noundef [[B_REAL]], x86_fp80 noundef [[B_IMAG]]) #[[ATTR2]]
// FULL-NEXT:    [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
// FULL-NEXT:    [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
// FULL-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// FULL-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// FULL-NEXT:    store x86_fp80 [[TMP0]], ptr [[RETVAL_REALP]], align 16
// FULL-NEXT:    store x86_fp80 [[TMP1]], ptr [[RETVAL_IMAGP]], align 16
// FULL-NEXT:    [[TMP2:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// FULL-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP2]]
//
// BASIC-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
// BASIC-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// BASIC-NEXT:  entry:
// BASIC-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// BASIC-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// BASIC-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// BASIC-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// BASIC-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// BASIC-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// BASIC-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// BASIC-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// BASIC-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// BASIC-NEXT:    [[TMP0:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
// BASIC-NEXT:    [[TMP1:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
// BASIC-NEXT:    [[TMP2:%.*]] = fadd x86_fp80 [[TMP0]], [[TMP1]]
// BASIC-NEXT:    [[TMP3:%.*]] = fmul x86_fp80 [[B_REAL]], [[B_REAL]]
// BASIC-NEXT:    [[TMP4:%.*]] = fmul x86_fp80 [[B_IMAG]], [[B_IMAG]]
// BASIC-NEXT:    [[TMP5:%.*]] = fadd x86_fp80 [[TMP3]], [[TMP4]]
// BASIC-NEXT:    [[TMP6:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
// BASIC-NEXT:    [[TMP7:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
// BASIC-NEXT:    [[TMP8:%.*]] = fsub x86_fp80 [[TMP6]], [[TMP7]]
// BASIC-NEXT:    [[TMP9:%.*]] = fdiv x86_fp80 [[TMP2]], [[TMP5]]
// BASIC-NEXT:    [[TMP10:%.*]] = fdiv x86_fp80 [[TMP8]], [[TMP5]]
// BASIC-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// BASIC-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// BASIC-NEXT:    store x86_fp80 [[TMP9]], ptr [[RETVAL_REALP]], align 16
// BASIC-NEXT:    store x86_fp80 [[TMP10]], ptr [[RETVAL_IMAGP]], align 16
// BASIC-NEXT:    [[TMP11:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// BASIC-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP11]]
//
// IMPRVD-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
// IMPRVD-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
// IMPRVD-NEXT:  entry:
// IMPRVD-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// IMPRVD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// IMPRVD-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// IMPRVD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// IMPRVD-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// IMPRVD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// IMPRVD-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// IMPRVD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// IMPRVD-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// IMPRVD-NEXT:    [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]])
// IMPRVD-NEXT:    [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]])
// IMPRVD-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt x86_fp80 [[TMP0]], [[TMP1]]
// IMPRVD-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// IMPRVD:       abs_rhsr_greater_or_equal_abs_rhsi:
// IMPRVD-NEXT:    [[TMP2:%.*]] = fdiv x86_fp80 [[B_IMAG]], [[B_REAL]]
// IMPRVD-NEXT:    [[TMP3:%.*]] = fmul x86_fp80 [[TMP2]], [[B_IMAG]]
// IMPRVD-NEXT:    [[TMP4:%.*]] = fadd x86_fp80 [[B_REAL]], [[TMP3]]
// IMPRVD-NEXT:    [[TMP5:%.*]] = fmul x86_fp80 [[A_IMAG]], [[TMP2]]
// IMPRVD-NEXT:    [[TMP6:%.*]] = fadd x86_fp80 [[A_REAL]], [[TMP5]]
// IMPRVD-NEXT:    [[TMP7:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP4]]
// IMPRVD-NEXT:    [[TMP8:%.*]] = fmul x86_fp80 [[A_REAL]], [[TMP2]]
// IMPRVD-NEXT:    [[TMP9:%.*]] = fsub x86_fp80 [[A_IMAG]], [[TMP8]]
// IMPRVD-NEXT:    [[TMP10:%.*]] = fdiv x86_fp80 [[TMP9]], [[TMP4]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV:%.*]]
// IMPRVD:       abs_rhsr_less_than_abs_rhsi:
// IMPRVD-NEXT:    [[TMP11:%.*]] = fdiv x86_fp80 [[B_REAL]], [[B_IMAG]]
// IMPRVD-NEXT:    [[TMP12:%.*]] = fmul x86_fp80 [[TMP11]], [[B_REAL]]
// IMPRVD-NEXT:    [[TMP13:%.*]] = fadd x86_fp80 [[B_IMAG]], [[TMP12]]
// IMPRVD-NEXT:    [[TMP14:%.*]] = fmul x86_fp80 [[A_REAL]], [[TMP11]]
// IMPRVD-NEXT:    [[TMP15:%.*]] = fadd x86_fp80 [[TMP14]], [[A_IMAG]]
// IMPRVD-NEXT:    [[TMP16:%.*]] = fdiv x86_fp80 [[TMP15]], [[TMP13]]
// IMPRVD-NEXT:    [[TMP17:%.*]] = fmul x86_fp80 [[A_IMAG]], [[TMP11]]
// IMPRVD-NEXT:    [[TMP18:%.*]] = fsub x86_fp80 [[TMP17]], [[A_REAL]]
// IMPRVD-NEXT:    [[TMP19:%.*]] = fdiv x86_fp80 [[TMP18]], [[TMP13]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV]]
// IMPRVD:       complex_div:
// IMPRVD-NEXT:    [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD-NEXT:    [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD-NEXT:    store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
// IMPRVD-NEXT:    store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
// IMPRVD-NEXT:    [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// IMPRVD-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP22]]
//
// PRMTD-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
// PRMTD-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// PRMTD-NEXT:  entry:
// PRMTD-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// PRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// PRMTD-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// PRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// PRMTD-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// PRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// PRMTD-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// PRMTD-NEXT:    [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]])
// PRMTD-NEXT:    [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]])
// PRMTD-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt x86_fp80 [[TMP0]], [[TMP1]]
// PRMTD-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// PRMTD:       abs_rhsr_greater_or_equal_abs_rhsi:
// PRMTD-NEXT:    [[TMP2:%.*]] = fdiv x86_fp80 [[B_IMAG]], [[B_REAL]]
// PRMTD-NEXT:    [[TMP3:%.*]] = fmul x86_fp80 [[TMP2]], [[B_IMAG]]
// PRMTD-NEXT:    [[TMP4:%.*]] = fadd x86_fp80 [[B_REAL]], [[TMP3]]
// PRMTD-NEXT:    [[TMP5:%.*]] = fmul x86_fp80 [[A_IMAG]], [[TMP2]]
// PRMTD-NEXT:    [[TMP6:%.*]] = fadd x86_fp80 [[A_REAL]], [[TMP5]]
// PRMTD-NEXT:    [[TMP7:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP4]]
// PRMTD-NEXT:    [[TMP8:%.*]] = fmul x86_fp80 [[A_REAL]], [[TMP2]]
// PRMTD-NEXT:    [[TMP9:%.*]] = fsub x86_fp80 [[A_IMAG]], [[TMP8]]
// PRMTD-NEXT:    [[TMP10:%.*]] = fdiv x86_fp80 [[TMP9]], [[TMP4]]
// PRMTD-NEXT:    br label [[COMPLEX_DIV:%.*]]
// PRMTD:       abs_rhsr_less_than_abs_rhsi:
// PRMTD-NEXT:    [[TMP11:%.*]] = fdiv x86_fp80 [[B_REAL]], [[B_IMAG]]
// PRMTD-NEXT:    [[TMP12:%.*]] = fmul x86_fp80 [[TMP11]], [[B_REAL]]
// PRMTD-NEXT:    [[TMP13:%.*]] = fadd x86_fp80 [[B_IMAG]], [[TMP12]]
// PRMTD-NEXT:    [[TMP14:%.*]] = fmul x86_fp80 [[A_REAL]], [[TMP11]]
// PRMTD-NEXT:    [[TMP15:%.*]] = fadd x86_fp80 [[TMP14]], [[A_IMAG]]
// PRMTD-NEXT:    [[TMP16:%.*]] = fdiv x86_fp80 [[TMP15]], [[TMP13]]
// PRMTD-NEXT:    [[TMP17:%.*]] = fmul x86_fp80 [[A_IMAG]], [[TMP11]]
// PRMTD-NEXT:    [[TMP18:%.*]] = fsub x86_fp80 [[TMP17]], [[A_REAL]]
// PRMTD-NEXT:    [[TMP19:%.*]] = fdiv x86_fp80 [[TMP18]], [[TMP13]]
// PRMTD-NEXT:    br label [[COMPLEX_DIV]]
// PRMTD:       complex_div:
// PRMTD-NEXT:    [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD-NEXT:    [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD-NEXT:    store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
// PRMTD-NEXT:    store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
// PRMTD-NEXT:    [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// PRMTD-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP22]]
//
// X86WINPRMTD-LABEL: define dso_local void @divld(
// X86WINPRMTD-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
// X86WINPRMTD-NEXT:  entry:
// X86WINPRMTD-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
// X86WINPRMTD-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD-NEXT:    store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
// X86WINPRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// X86WINPRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]])
// X86WINPRMTD-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]])
// X86WINPRMTD-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt double [[TMP0]], [[TMP1]]
// X86WINPRMTD-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// X86WINPRMTD:       abs_rhsr_greater_or_equal_abs_rhsi:
// X86WINPRMTD-NEXT:    [[TMP2:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[TMP3:%.*]] = fmul double [[TMP2]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[TMP4:%.*]] = fadd double [[B_REAL]], [[TMP3]]
// X86WINPRMTD-NEXT:    [[TMP5:%.*]] = fmul double [[A_IMAG]], [[TMP2]]
// X86WINPRMTD-NEXT:    [[TMP6:%.*]] = fadd double [[A_REAL]], [[TMP5]]
// X86WINPRMTD-NEXT:    [[TMP7:%.*]] = fdiv double [[TMP6]], [[TMP4]]
// X86WINPRMTD-NEXT:    [[TMP8:%.*]] = fmul double [[A_REAL]], [[TMP2]]
// X86WINPRMTD-NEXT:    [[TMP9:%.*]] = fsub double [[A_IMAG]], [[TMP8]]
// X86WINPRMTD-NEXT:    [[TMP10:%.*]] = fdiv double [[TMP9]], [[TMP4]]
// X86WINPRMTD-NEXT:    br label [[COMPLEX_DIV:%.*]]
// X86WINPRMTD:       abs_rhsr_less_than_abs_rhsi:
// X86WINPRMTD-NEXT:    [[TMP11:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[TMP12:%.*]] = fmul double [[TMP11]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[TMP13:%.*]] = fadd double [[B_IMAG]], [[TMP12]]
// X86WINPRMTD-NEXT:    [[TMP14:%.*]] = fmul double [[A_REAL]], [[TMP11]]
// X86WINPRMTD-NEXT:    [[TMP15:%.*]] = fadd double [[TMP14]], [[A_IMAG]]
// X86WINPRMTD-NEXT:    [[TMP16:%.*]] = fdiv double [[TMP15]], [[TMP13]]
// X86WINPRMTD-NEXT:    [[TMP17:%.*]] = fmul double [[A_IMAG]], [[TMP11]]
// X86WINPRMTD-NEXT:    [[TMP18:%.*]] = fsub double [[TMP17]], [[A_REAL]]
// X86WINPRMTD-NEXT:    [[TMP19:%.*]] = fdiv double [[TMP18]], [[TMP13]]
// X86WINPRMTD-NEXT:    br label [[COMPLEX_DIV]]
// X86WINPRMTD:       complex_div:
// X86WINPRMTD-NEXT:    [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD-NEXT:    [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store double [[TMP20]], ptr [[AGG_RESULT_REALP]], align 8
// X86WINPRMTD-NEXT:    store double [[TMP21]], ptr [[AGG_RESULT_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
// X86WINPRMTD-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
// X86WINPRMTD-NEXT:    ret void
//
// AVRFP32-LABEL: define dso_local { float, float } @divld(
// AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP32-NEXT:  entry:
// AVRFP32-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[A:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[B:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP32-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP32-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP32-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
// AVRFP32-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
// AVRFP32-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
// AVRFP32-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP4:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_REAL]])
// AVRFP32-NEXT:    [[TMP5:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_IMAG]])
// AVRFP32-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt float [[TMP4]], [[TMP5]]
// AVRFP32-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// AVRFP32:       abs_rhsr_greater_or_equal_abs_rhsi:
// AVRFP32-NEXT:    [[TMP6:%.*]] = fdiv float [[B_IMAG]], [[B_REAL]]
// AVRFP32-NEXT:    [[TMP7:%.*]] = fmul float [[TMP6]], [[B_IMAG]]
// AVRFP32-NEXT:    [[TMP8:%.*]] = fadd float [[B_REAL]], [[TMP7]]
// AVRFP32-NEXT:    [[TMP9:%.*]] = fmul float [[A_IMAG]], [[TMP6]]
// AVRFP32-NEXT:    [[TMP10:%.*]] = fadd float [[A_REAL]], [[TMP9]]
// AVRFP32-NEXT:    [[TMP11:%.*]] = fdiv float [[TMP10]], [[TMP8]]
// AVRFP32-NEXT:    [[TMP12:%.*]] = fmul float [[A_REAL]], [[TMP6]]
// AVRFP32-NEXT:    [[TMP13:%.*]] = fsub float [[A_IMAG]], [[TMP12]]
// AVRFP32-NEXT:    [[TMP14:%.*]] = fdiv float [[TMP13]], [[TMP8]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV:%.*]]
// AVRFP32:       abs_rhsr_less_than_abs_rhsi:
// AVRFP32-NEXT:    [[TMP15:%.*]] = fdiv float [[B_REAL]], [[B_IMAG]]
// AVRFP32-NEXT:    [[TMP16:%.*]] = fmul float [[TMP15]], [[B_REAL]]
// AVRFP32-NEXT:    [[TMP17:%.*]] = fadd float [[B_IMAG]], [[TMP16]]
// AVRFP32-NEXT:    [[TMP18:%.*]] = fmul float [[A_REAL]], [[TMP15]]
// AVRFP32-NEXT:    [[TMP19:%.*]] = fadd float [[TMP18]], [[A_IMAG]]
// AVRFP32-NEXT:    [[TMP20:%.*]] = fdiv float [[TMP19]], [[TMP17]]
// AVRFP32-NEXT:    [[TMP21:%.*]] = fmul float [[A_IMAG]], [[TMP15]]
// AVRFP32-NEXT:    [[TMP22:%.*]] = fsub float [[TMP21]], [[A_REAL]]
// AVRFP32-NEXT:    [[TMP23:%.*]] = fdiv float [[TMP22]], [[TMP17]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV]]
// AVRFP32:       complex_div:
// AVRFP32-NEXT:    [[TMP24:%.*]] = phi float [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP32-NEXT:    [[TMP25:%.*]] = phi float [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP32-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP32-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[TMP24]], ptr [[RETVAL_REALP]], align 1
// AVRFP32-NEXT:    store float [[TMP25]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP26:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP32-NEXT:    ret { float, float } [[TMP26]]
//
// AVRFP64-LABEL: define dso_local void @divld(
// AVRFP64-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 1 [[AGG_RESULT:%.*]], double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP64-NEXT:  entry:
// AVRFP64-NEXT:    [[A:%.*]] = alloca { double, double }, align 1
// AVRFP64-NEXT:    [[B:%.*]] = alloca { double, double }, align 1
// AVRFP64-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP64-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP64-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 1
// AVRFP64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 1
// AVRFP64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 1
// AVRFP64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 1
// AVRFP64-NEXT:    [[TMP4:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[B_REAL]])
// AVRFP64-NEXT:    [[TMP5:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[B_IMAG]])
// AVRFP64-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt double [[TMP4]], [[TMP5]]
// AVRFP64-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// AVRFP64:       abs_rhsr_greater_or_equal_abs_rhsi:
// AVRFP64-NEXT:    [[TMP6:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
// AVRFP64-NEXT:    [[TMP7:%.*]] = fmul double [[TMP6]], [[B_IMAG]]
// AVRFP64-NEXT:    [[TMP8:%.*]] = fadd double [[B_REAL]], [[TMP7]]
// AVRFP64-NEXT:    [[TMP9:%.*]] = fmul double [[A_IMAG]], [[TMP6]]
// AVRFP64-NEXT:    [[TMP10:%.*]] = fadd double [[A_REAL]], [[TMP9]]
// AVRFP64-NEXT:    [[TMP11:%.*]] = fdiv double [[TMP10]], [[TMP8]]
// AVRFP64-NEXT:    [[TMP12:%.*]] = fmul double [[A_REAL]], [[TMP6]]
// AVRFP64-NEXT:    [[TMP13:%.*]] = fsub double [[A_IMAG]], [[TMP12]]
// AVRFP64-NEXT:    [[TMP14:%.*]] = fdiv double [[TMP13]], [[TMP8]]
// AVRFP64-NEXT:    br label [[COMPLEX_DIV:%.*]]
// AVRFP64:       abs_rhsr_less_than_abs_rhsi:
// AVRFP64-NEXT:    [[TMP15:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
// AVRFP64-NEXT:    [[TMP16:%.*]] = fmul double [[TMP15]], [[B_REAL]]
// AVRFP64-NEXT:    [[TMP17:%.*]] = fadd double [[B_IMAG]], [[TMP16]]
// AVRFP64-NEXT:    [[TMP18:%.*]] = fmul double [[A_REAL]], [[TMP15]]
// AVRFP64-NEXT:    [[TMP19:%.*]] = fadd double [[TMP18]], [[A_IMAG]]
// AVRFP64-NEXT:    [[TMP20:%.*]] = fdiv double [[TMP19]], [[TMP17]]
// AVRFP64-NEXT:    [[TMP21:%.*]] = fmul double [[A_IMAG]], [[TMP15]]
// AVRFP64-NEXT:    [[TMP22:%.*]] = fsub double [[TMP21]], [[A_REAL]]
// AVRFP64-NEXT:    [[TMP23:%.*]] = fdiv double [[TMP22]], [[TMP17]]
// AVRFP64-NEXT:    br label [[COMPLEX_DIV]]
// AVRFP64:       complex_div:
// AVRFP64-NEXT:    [[TMP24:%.*]] = phi double [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP64-NEXT:    [[TMP25:%.*]] = phi double [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP64-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[TMP24]], ptr [[AGG_RESULT_REALP]], align 1
// AVRFP64-NEXT:    store double [[TMP25]], ptr [[AGG_RESULT_IMAGP]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 1
// AVRFP64-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 1
// AVRFP64-NEXT:    ret void
//
// BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @divld(
// BASIC_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// BASIC_FAST-NEXT:  entry:
// BASIC_FAST-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// BASIC_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// BASIC_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// BASIC_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// BASIC_FAST-NEXT:    [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP0]], [[TMP1]]
// BASIC_FAST-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[TMP5:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP3]], [[TMP4]]
// BASIC_FAST-NEXT:    [[TMP6:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP7]]
// BASIC_FAST-NEXT:    [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[TMP5]]
// BASIC_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP8]], [[TMP5]]
// BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// BASIC_FAST-NEXT:    store x86_fp80 [[TMP9]], ptr [[RETVAL_REALP]], align 16
// BASIC_FAST-NEXT:    store x86_fp80 [[TMP10]], ptr [[RETVAL_IMAGP]], align 16
// BASIC_FAST-NEXT:    [[TMP11:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// BASIC_FAST-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP11]]
//
// FULL_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @divld(
// FULL_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// FULL_FAST-NEXT:  entry:
// FULL_FAST-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// FULL_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// FULL_FAST-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// FULL_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// FULL_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// FULL_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// FULL_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef nofpclass(nan inf) [[A_REAL]], x86_fp80 noundef nofpclass(nan inf) [[A_IMAG]], x86_fp80 noundef nofpclass(nan inf) [[B_REAL]], x86_fp80 noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
// FULL_FAST-NEXT:    [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
// FULL_FAST-NEXT:    [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
// FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// FULL_FAST-NEXT:    store x86_fp80 [[TMP0]], ptr [[RETVAL_REALP]], align 16
// FULL_FAST-NEXT:    store x86_fp80 [[TMP1]], ptr [[RETVAL_IMAGP]], align 16
// FULL_FAST-NEXT:    [[TMP2:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// FULL_FAST-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP2]]
//
// IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @divld(
// IMPRVD_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
// IMPRVD_FAST-NEXT:  entry:
// IMPRVD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// IMPRVD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// IMPRVD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// IMPRVD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]])
// IMPRVD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]])
// IMPRVD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
// IMPRVD_FAST-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// IMPRVD_FAST:       abs_rhsr_greater_or_equal_abs_rhsi:
// IMPRVD_FAST-NEXT:    [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP3]]
// IMPRVD_FAST-NEXT:    [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP2]]
// IMPRVD_FAST-NEXT:    [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP5]]
// IMPRVD_FAST-NEXT:    [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP4]]
// IMPRVD_FAST-NEXT:    [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP2]]
// IMPRVD_FAST-NEXT:    [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP8]]
// IMPRVD_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]], [[TMP4]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV:%.*]]
// IMPRVD_FAST:       abs_rhsr_less_than_abs_rhsi:
// IMPRVD_FAST-NEXT:    [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP11]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP12]]
// IMPRVD_FAST-NEXT:    [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP11]]
// IMPRVD_FAST-NEXT:    [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP14]], [[A_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP15]], [[TMP13]]
// IMPRVD_FAST-NEXT:    [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP11]]
// IMPRVD_FAST-NEXT:    [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP17]], [[A_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP18]], [[TMP13]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV]]
// IMPRVD_FAST:       complex_div:
// IMPRVD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
// IMPRVD_FAST-NEXT:    store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
// IMPRVD_FAST-NEXT:    [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// IMPRVD_FAST-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP22]]
//
// PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @divld(
// PRMTD_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// PRMTD_FAST-NEXT:  entry:
// PRMTD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// PRMTD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// PRMTD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]])
// PRMTD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]])
// PRMTD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
// PRMTD_FAST-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// PRMTD_FAST:       abs_rhsr_greater_or_equal_abs_rhsi:
// PRMTD_FAST-NEXT:    [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[B_REAL]]
// PRMTD_FAST-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[B_IMAG]]
// PRMTD_FAST-NEXT:    [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP3]]
// PRMTD_FAST-NEXT:    [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP2]]
// PRMTD_FAST-NEXT:    [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP5]]
// PRMTD_FAST-NEXT:    [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP4]]
// PRMTD_FAST-NEXT:    [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP2]]
// PRMTD_FAST-NEXT:    [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP8]]
// PRMTD_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]], [[TMP4]]
// PRMTD_FAST-NEXT:    br label [[COMPLEX_DIV:%.*]]
// PRMTD_FAST:       abs_rhsr_less_than_abs_rhsi:
// PRMTD_FAST-NEXT:    [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[B_IMAG]]
// PRMTD_FAST-NEXT:    [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP11]], [[B_REAL]]
// PRMTD_FAST-NEXT:    [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP12]]
// PRMTD_FAST-NEXT:    [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP11]]
// PRMTD_FAST-NEXT:    [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP14]], [[A_IMAG]]
// PRMTD_FAST-NEXT:    [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP15]], [[TMP13]]
// PRMTD_FAST-NEXT:    [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP11]]
// PRMTD_FAST-NEXT:    [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP17]], [[A_REAL]]
// PRMTD_FAST-NEXT:    [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP18]], [[TMP13]]
// PRMTD_FAST-NEXT:    br label [[COMPLEX_DIV]]
// PRMTD_FAST:       complex_div:
// PRMTD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
// PRMTD_FAST-NEXT:    store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
// PRMTD_FAST-NEXT:    [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// PRMTD_FAST-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP22]]
//
// X86WINPRMTD_STRICT-LABEL: define dso_local void @divld(
// X86WINPRMTD_STRICT-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
// X86WINPRMTD_STRICT-NEXT:  entry:
// X86WINPRMTD_STRICT-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]]) #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]]) #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// X86WINPRMTD_STRICT:       abs_rhsr_greater_or_equal_abs_rhsi:
// X86WINPRMTD_STRICT-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[B_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP2]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP4:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_REAL]], double [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP5:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP6:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A_REAL]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP7:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP6]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP8:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP9:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A_IMAG]], double [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP9]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV:%.*]]
// X86WINPRMTD_STRICT:       abs_rhsr_less_than_abs_rhsi:
// X86WINPRMTD_STRICT-NEXT:    [[TMP11:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[B_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP11]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_IMAG]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP14:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP14]], double [[A_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP16:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP15]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP17:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP18:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP17]], double [[A_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP19:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP18]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV]]
// X86WINPRMTD_STRICT:       complex_div:
// X86WINPRMTD_STRICT-NEXT:    [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD_STRICT-NEXT:    [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store double [[TMP20]], ptr [[AGG_RESULT_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    store double [[TMP21]], ptr [[AGG_RESULT_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
// X86WINPRMTD_STRICT-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
// X86WINPRMTD_STRICT-NEXT:    ret void
//
// PRMTD_STRICT-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
// PRMTD_STRICT-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
// PRMTD_STRICT-NEXT:  entry:
// PRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// PRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// PRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// PRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// PRMTD_STRICT-NEXT:    [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]]) #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]]) #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// PRMTD_STRICT:       abs_rhsr_greater_or_equal_abs_rhsi:
// PRMTD_STRICT-NEXT:    [[TMP2:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP3:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[TMP2]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP4:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[B_REAL]], x86_fp80 [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP5:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP6:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[A_REAL]], x86_fp80 [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP7:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP6]], x86_fp80 [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP8:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_REAL]], x86_fp80 [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP9:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP10:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP9]], x86_fp80 [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV:%.*]]
// PRMTD_STRICT:       abs_rhsr_less_than_abs_rhsi:
// PRMTD_STRICT-NEXT:    [[TMP11:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[B_REAL]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP12:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[TMP11]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP13:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP14:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_REAL]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP15:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[TMP14]], x86_fp80 [[A_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP16:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP15]], x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP17:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP18:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[TMP17]], x86_fp80 [[A_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP19:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP18]], x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV]]
// PRMTD_STRICT:       complex_div:
// PRMTD_STRICT-NEXT:    [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD_STRICT-NEXT:    [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
// PRMTD_STRICT-NEXT:    store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
// PRMTD_STRICT-NEXT:    [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// PRMTD_STRICT-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP22]]
//
_Complex long double divld(_Complex long double a, _Complex long double b) {
  return a / b;
}

// FULL-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
// FULL-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// FULL-NEXT:  entry:
// FULL-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// FULL-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// FULL-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// FULL-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// FULL-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// FULL-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// FULL-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// FULL-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// FULL-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// FULL-NEXT:    [[MUL_AC:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
// FULL-NEXT:    [[MUL_BD:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
// FULL-NEXT:    [[MUL_AD:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
// FULL-NEXT:    [[MUL_BC:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
// FULL-NEXT:    [[MUL_R:%.*]] = fsub x86_fp80 [[MUL_AC]], [[MUL_BD]]
// FULL-NEXT:    [[MUL_I:%.*]] = fadd x86_fp80 [[MUL_AD]], [[MUL_BC]]
// FULL-NEXT:    [[ISNAN_CMP:%.*]] = fcmp uno x86_fp80 [[MUL_R]], [[MUL_R]]
// FULL-NEXT:    br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2]]
// FULL:       complex_mul_imag_nan:
// FULL-NEXT:    [[ISNAN_CMP1:%.*]] = fcmp uno x86_fp80 [[MUL_I]], [[MUL_I]]
// FULL-NEXT:    br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
// FULL:       complex_mul_libcall:
// FULL-NEXT:    [[CALL:%.*]] = call { x86_fp80, x86_fp80 } @__mulxc3(x86_fp80 noundef [[A_REAL]], x86_fp80 noundef [[A_IMAG]], x86_fp80 noundef [[B_REAL]], x86_fp80 noundef [[B_IMAG]]) #[[ATTR2]]
// FULL-NEXT:    [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
// FULL-NEXT:    [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
// FULL-NEXT:    br label [[COMPLEX_MUL_CONT]]
// FULL:       complex_mul_cont:
// FULL-NEXT:    [[REAL_MUL_PHI:%.*]] = phi x86_fp80 [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi x86_fp80 [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// FULL-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// FULL-NEXT:    store x86_fp80 [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 16
// FULL-NEXT:    store x86_fp80 [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 16
// FULL-NEXT:    [[TMP2:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// FULL-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP2]]
//
// BASIC-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
// BASIC-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// BASIC-NEXT:  entry:
// BASIC-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// BASIC-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// BASIC-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// BASIC-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// BASIC-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// BASIC-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// BASIC-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// BASIC-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// BASIC-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// BASIC-NEXT:    [[MUL_AC:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
// BASIC-NEXT:    [[MUL_BD:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
// BASIC-NEXT:    [[MUL_AD:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
// BASIC-NEXT:    [[MUL_BC:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
// BASIC-NEXT:    [[MUL_R:%.*]] = fsub x86_fp80 [[MUL_AC]], [[MUL_BD]]
// BASIC-NEXT:    [[MUL_I:%.*]] = fadd x86_fp80 [[MUL_AD]], [[MUL_BC]]
// BASIC-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// BASIC-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// BASIC-NEXT:    store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
// BASIC-NEXT:    store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
// BASIC-NEXT:    [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// BASIC-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP0]]
//
// IMPRVD-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
// IMPRVD-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
// IMPRVD-NEXT:  entry:
// IMPRVD-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// IMPRVD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// IMPRVD-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// IMPRVD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// IMPRVD-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// IMPRVD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// IMPRVD-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// IMPRVD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// IMPRVD-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// IMPRVD-NEXT:    [[MUL_AC:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
// IMPRVD-NEXT:    [[MUL_BD:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
// IMPRVD-NEXT:    [[MUL_AD:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
// IMPRVD-NEXT:    [[MUL_BC:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
// IMPRVD-NEXT:    [[MUL_R:%.*]] = fsub x86_fp80 [[MUL_AC]], [[MUL_BD]]
// IMPRVD-NEXT:    [[MUL_I:%.*]] = fadd x86_fp80 [[MUL_AD]], [[MUL_BC]]
// IMPRVD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD-NEXT:    store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
// IMPRVD-NEXT:    store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
// IMPRVD-NEXT:    [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// IMPRVD-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP0]]
//
// PRMTD-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
// PRMTD-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// PRMTD-NEXT:  entry:
// PRMTD-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// PRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// PRMTD-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// PRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// PRMTD-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// PRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// PRMTD-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// PRMTD-NEXT:    [[MUL_AC:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
// PRMTD-NEXT:    [[MUL_BD:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
// PRMTD-NEXT:    [[MUL_AD:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
// PRMTD-NEXT:    [[MUL_BC:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
// PRMTD-NEXT:    [[MUL_R:%.*]] = fsub x86_fp80 [[MUL_AC]], [[MUL_BD]]
// PRMTD-NEXT:    [[MUL_I:%.*]] = fadd x86_fp80 [[MUL_AD]], [[MUL_BC]]
// PRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD-NEXT:    store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
// PRMTD-NEXT:    store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
// PRMTD-NEXT:    [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// PRMTD-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP0]]
//
// X86WINPRMTD-LABEL: define dso_local void @mulld(
// X86WINPRMTD-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
// X86WINPRMTD-NEXT:  entry:
// X86WINPRMTD-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
// X86WINPRMTD-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD-NEXT:    store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
// X86WINPRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// X86WINPRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
// X86WINPRMTD-NEXT:    [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 8
// X86WINPRMTD-NEXT:    store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
// X86WINPRMTD-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
// X86WINPRMTD-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
// X86WINPRMTD-NEXT:    ret void
//
// AVRFP32-LABEL: define dso_local { float, float } @mulld(
// AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP32-NEXT:  entry:
// AVRFP32-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[A:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[B:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP32-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP32-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP32-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
// AVRFP32-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
// AVRFP32-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
// AVRFP32-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
// AVRFP32-NEXT:    [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
// AVRFP32-NEXT:    [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
// AVRFP32-NEXT:    [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
// AVRFP32-NEXT:    [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
// AVRFP32-NEXT:    [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
// AVRFP32-NEXT:    [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
// AVRFP32-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP32-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[MUL_R]], ptr [[RETVAL_REALP]], align 1
// AVRFP32-NEXT:    store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP4:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP32-NEXT:    ret { float, float } [[TMP4]]
//
// AVRFP64-LABEL: define dso_local void @mulld(
// AVRFP64-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 1 [[AGG_RESULT:%.*]], double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP64-NEXT:  entry:
// AVRFP64-NEXT:    [[A:%.*]] = alloca { double, double }, align 1
// AVRFP64-NEXT:    [[B:%.*]] = alloca { double, double }, align 1
// AVRFP64-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    store double [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP64-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP64-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 1
// AVRFP64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 1
// AVRFP64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 1
// AVRFP64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 1
// AVRFP64-NEXT:    [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
// AVRFP64-NEXT:    [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
// AVRFP64-NEXT:    [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
// AVRFP64-NEXT:    [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
// AVRFP64-NEXT:    [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
// AVRFP64-NEXT:    [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
// AVRFP64-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 1
// AVRFP64-NEXT:    store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 1
// AVRFP64-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// AVRFP64-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 1
// AVRFP64-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 1
// AVRFP64-NEXT:    ret void
//
// BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @mulld(
// BASIC_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// BASIC_FAST-NEXT:  entry:
// BASIC_FAST-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// BASIC_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// BASIC_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// BASIC_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// BASIC_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
// BASIC_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
// BASIC_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AC]], [[MUL_BD]]
// BASIC_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AD]], [[MUL_BC]]
// BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// BASIC_FAST-NEXT:    store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
// BASIC_FAST-NEXT:    store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
// BASIC_FAST-NEXT:    [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// BASIC_FAST-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP0]]
//
// FULL_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @mulld(
// FULL_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// FULL_FAST-NEXT:  entry:
// FULL_FAST-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// FULL_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// FULL_FAST-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// FULL_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// FULL_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// FULL_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// FULL_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// FULL_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
// FULL_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
// FULL_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
// FULL_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
// FULL_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AC]], [[MUL_BD]]
// FULL_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AD]], [[MUL_BC]]
// FULL_FAST-NEXT:    [[ISNAN_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno x86_fp80 [[MUL_R]], [[MUL_R]]
// FULL_FAST-NEXT:    br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2]]
// FULL_FAST:       complex_mul_imag_nan:
// FULL_FAST-NEXT:    [[ISNAN_CMP1:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno x86_fp80 [[MUL_I]], [[MUL_I]]
// FULL_FAST-NEXT:    br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
// FULL_FAST:       complex_mul_libcall:
// FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { x86_fp80, x86_fp80 } @__mulxc3(x86_fp80 noundef nofpclass(nan inf) [[A_REAL]], x86_fp80 noundef nofpclass(nan inf) [[A_IMAG]], x86_fp80 noundef nofpclass(nan inf) [[B_REAL]], x86_fp80 noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
// FULL_FAST-NEXT:    [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
// FULL_FAST-NEXT:    [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
// FULL_FAST-NEXT:    br label [[COMPLEX_MUL_CONT]]
// FULL_FAST:       complex_mul_cont:
// FULL_FAST-NEXT:    [[REAL_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL_FAST-NEXT:    [[IMAG_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], [[COMPLEX_MUL_LIBCALL]] ]
// FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// FULL_FAST-NEXT:    store x86_fp80 [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 16
// FULL_FAST-NEXT:    store x86_fp80 [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 16
// FULL_FAST-NEXT:    [[TMP2:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// FULL_FAST-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP2]]
//
// IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @mulld(
// IMPRVD_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
// IMPRVD_FAST-NEXT:  entry:
// IMPRVD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// IMPRVD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// IMPRVD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// IMPRVD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// IMPRVD_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AC]], [[MUL_BD]]
// IMPRVD_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AD]], [[MUL_BC]]
// IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
// IMPRVD_FAST-NEXT:    store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
// IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// IMPRVD_FAST-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP0]]
//
// PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @mulld(
// PRMTD_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
// PRMTD_FAST-NEXT:  entry:
// PRMTD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// PRMTD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// PRMTD_FAST-NEXT:    [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
// PRMTD_FAST-NEXT:    [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
// PRMTD_FAST-NEXT:    [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
// PRMTD_FAST-NEXT:    [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
// PRMTD_FAST-NEXT:    [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AC]], [[MUL_BD]]
// PRMTD_FAST-NEXT:    [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AD]], [[MUL_BC]]
// PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
// PRMTD_FAST-NEXT:    store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
// PRMTD_FAST-NEXT:    [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// PRMTD_FAST-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP0]]
//
// X86WINPRMTD_STRICT-LABEL: define dso_local void @mulld(
// X86WINPRMTD_STRICT-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
// X86WINPRMTD_STRICT-NEXT:  entry:
// X86WINPRMTD_STRICT-NEXT:    [[RESULT_PTR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD_STRICT-NEXT:    store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[MUL_AC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_BD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_AD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_BC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_R:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[MUL_AC]], double [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[MUL_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[MUL_AD]], double [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
// X86WINPRMTD_STRICT-NEXT:    store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
// X86WINPRMTD_STRICT-NEXT:    ret void
//
// PRMTD_STRICT-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
// PRMTD_STRICT-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
// PRMTD_STRICT-NEXT:  entry:
// PRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
// PRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
// PRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
// PRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// PRMTD_STRICT-NEXT:    [[MUL_AC:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_REAL]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_BD:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_AD:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_REAL]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_BC:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_R:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[MUL_AC]], x86_fp80 [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[MUL_I:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[MUL_AD]], x86_fp80 [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
// PRMTD_STRICT-NEXT:    store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
// PRMTD_STRICT-NEXT:    [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
// PRMTD_STRICT-NEXT:    ret { x86_fp80, x86_fp80 } [[TMP0]]
//
_Complex long double mulld(_Complex long double a, _Complex long double b) {
  return a * b;
}

// FULL-LABEL: define dso_local <2 x float> @f1(
// FULL-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
// FULL-NEXT:  entry:
// FULL-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    [[COERCE:%.*]] = alloca { float, float }, align 4
// FULL-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// FULL-NEXT:    store <2 x float> [[C_COERCE]], ptr [[C]], align 4
// FULL-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// FULL-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// FULL-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// FULL-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// FULL-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// FULL-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// FULL-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// FULL-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// FULL-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
// FULL-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
// FULL-NEXT:    [[CALL:%.*]] = call { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef [[B_REAL]], x86_fp80 noundef [[B_IMAG]], x86_fp80 noundef [[CONV]], x86_fp80 noundef [[CONV1]]) #[[ATTR2]]
// FULL-NEXT:    [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
// FULL-NEXT:    [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
// FULL-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP0]] to float
// FULL-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP1]] to float
// FULL-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// FULL-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// FULL-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// FULL-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// FULL-NEXT:    [[CALL4:%.*]] = call <2 x float> @__divsc3(float noundef [[CONV2]], float noundef [[CONV3]], float noundef [[A_REAL]], float noundef [[A_IMAG]]) #[[ATTR2]]
// FULL-NEXT:    store <2 x float> [[CALL4]], ptr [[COERCE]], align 4
// FULL-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
// FULL-NEXT:    [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
// FULL-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
// FULL-NEXT:    [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
// FULL-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// FULL-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// FULL-NEXT:    store float [[COERCE_REAL]], ptr [[RETVAL_REALP]], align 4
// FULL-NEXT:    store float [[COERCE_IMAG]], ptr [[RETVAL_IMAGP]], align 4
// FULL-NEXT:    [[TMP2:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// FULL-NEXT:    ret <2 x float> [[TMP2]]
//
// BASIC-LABEL: define dso_local <2 x float> @f1(
// BASIC-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
// BASIC-NEXT:  entry:
// BASIC-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// BASIC-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// BASIC-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// BASIC-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// BASIC-NEXT:    store <2 x float> [[C_COERCE]], ptr [[C]], align 4
// BASIC-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// BASIC-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// BASIC-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// BASIC-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// BASIC-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// BASIC-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// BASIC-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// BASIC-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// BASIC-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
// BASIC-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
// BASIC-NEXT:    [[TMP0:%.*]] = fmul x86_fp80 [[B_REAL]], [[CONV]]
// BASIC-NEXT:    [[TMP1:%.*]] = fmul x86_fp80 [[B_IMAG]], [[CONV1]]
// BASIC-NEXT:    [[TMP2:%.*]] = fadd x86_fp80 [[TMP0]], [[TMP1]]
// BASIC-NEXT:    [[TMP3:%.*]] = fmul x86_fp80 [[CONV]], [[CONV]]
// BASIC-NEXT:    [[TMP4:%.*]] = fmul x86_fp80 [[CONV1]], [[CONV1]]
// BASIC-NEXT:    [[TMP5:%.*]] = fadd x86_fp80 [[TMP3]], [[TMP4]]
// BASIC-NEXT:    [[TMP6:%.*]] = fmul x86_fp80 [[B_IMAG]], [[CONV]]
// BASIC-NEXT:    [[TMP7:%.*]] = fmul x86_fp80 [[B_REAL]], [[CONV1]]
// BASIC-NEXT:    [[TMP8:%.*]] = fsub x86_fp80 [[TMP6]], [[TMP7]]
// BASIC-NEXT:    [[TMP9:%.*]] = fdiv x86_fp80 [[TMP2]], [[TMP5]]
// BASIC-NEXT:    [[TMP10:%.*]] = fdiv x86_fp80 [[TMP8]], [[TMP5]]
// BASIC-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP9]] to float
// BASIC-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP10]] to float
// BASIC-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// BASIC-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// BASIC-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// BASIC-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// BASIC-NEXT:    [[TMP11:%.*]] = fmul float [[CONV2]], [[A_REAL]]
// BASIC-NEXT:    [[TMP12:%.*]] = fmul float [[CONV3]], [[A_IMAG]]
// BASIC-NEXT:    [[TMP13:%.*]] = fadd float [[TMP11]], [[TMP12]]
// BASIC-NEXT:    [[TMP14:%.*]] = fmul float [[A_REAL]], [[A_REAL]]
// BASIC-NEXT:    [[TMP15:%.*]] = fmul float [[A_IMAG]], [[A_IMAG]]
// BASIC-NEXT:    [[TMP16:%.*]] = fadd float [[TMP14]], [[TMP15]]
// BASIC-NEXT:    [[TMP17:%.*]] = fmul float [[CONV3]], [[A_REAL]]
// BASIC-NEXT:    [[TMP18:%.*]] = fmul float [[CONV2]], [[A_IMAG]]
// BASIC-NEXT:    [[TMP19:%.*]] = fsub float [[TMP17]], [[TMP18]]
// BASIC-NEXT:    [[TMP20:%.*]] = fdiv float [[TMP13]], [[TMP16]]
// BASIC-NEXT:    [[TMP21:%.*]] = fdiv float [[TMP19]], [[TMP16]]
// BASIC-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// BASIC-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// BASIC-NEXT:    store float [[TMP20]], ptr [[RETVAL_REALP]], align 4
// BASIC-NEXT:    store float [[TMP21]], ptr [[RETVAL_IMAGP]], align 4
// BASIC-NEXT:    [[TMP22:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// BASIC-NEXT:    ret <2 x float> [[TMP22]]
//
// IMPRVD-LABEL: define dso_local <2 x float> @f1(
// IMPRVD-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
// IMPRVD-NEXT:  entry:
// IMPRVD-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// IMPRVD-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// IMPRVD-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// IMPRVD-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// IMPRVD-NEXT:    store <2 x float> [[C_COERCE]], ptr [[C]], align 4
// IMPRVD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// IMPRVD-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// IMPRVD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// IMPRVD-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// IMPRVD-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// IMPRVD-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// IMPRVD-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// IMPRVD-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// IMPRVD-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
// IMPRVD-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
// IMPRVD-NEXT:    [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
// IMPRVD-NEXT:    [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
// IMPRVD-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt x86_fp80 [[TMP0]], [[TMP1]]
// IMPRVD-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// IMPRVD:       abs_rhsr_greater_or_equal_abs_rhsi:
// IMPRVD-NEXT:    [[TMP2:%.*]] = fdiv x86_fp80 [[CONV1]], [[CONV]]
// IMPRVD-NEXT:    [[TMP3:%.*]] = fmul x86_fp80 [[TMP2]], [[CONV1]]
// IMPRVD-NEXT:    [[TMP4:%.*]] = fadd x86_fp80 [[CONV]], [[TMP3]]
// IMPRVD-NEXT:    [[TMP5:%.*]] = fmul x86_fp80 [[B_IMAG]], [[TMP2]]
// IMPRVD-NEXT:    [[TMP6:%.*]] = fadd x86_fp80 [[B_REAL]], [[TMP5]]
// IMPRVD-NEXT:    [[TMP7:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP4]]
// IMPRVD-NEXT:    [[TMP8:%.*]] = fmul x86_fp80 [[B_REAL]], [[TMP2]]
// IMPRVD-NEXT:    [[TMP9:%.*]] = fsub x86_fp80 [[B_IMAG]], [[TMP8]]
// IMPRVD-NEXT:    [[TMP10:%.*]] = fdiv x86_fp80 [[TMP9]], [[TMP4]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV:%.*]]
// IMPRVD:       abs_rhsr_less_than_abs_rhsi:
// IMPRVD-NEXT:    [[TMP11:%.*]] = fdiv x86_fp80 [[CONV]], [[CONV1]]
// IMPRVD-NEXT:    [[TMP12:%.*]] = fmul x86_fp80 [[TMP11]], [[CONV]]
// IMPRVD-NEXT:    [[TMP13:%.*]] = fadd x86_fp80 [[CONV1]], [[TMP12]]
// IMPRVD-NEXT:    [[TMP14:%.*]] = fmul x86_fp80 [[B_REAL]], [[TMP11]]
// IMPRVD-NEXT:    [[TMP15:%.*]] = fadd x86_fp80 [[TMP14]], [[B_IMAG]]
// IMPRVD-NEXT:    [[TMP16:%.*]] = fdiv x86_fp80 [[TMP15]], [[TMP13]]
// IMPRVD-NEXT:    [[TMP17:%.*]] = fmul x86_fp80 [[B_IMAG]], [[TMP11]]
// IMPRVD-NEXT:    [[TMP18:%.*]] = fsub x86_fp80 [[TMP17]], [[B_REAL]]
// IMPRVD-NEXT:    [[TMP19:%.*]] = fdiv x86_fp80 [[TMP18]], [[TMP13]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV]]
// IMPRVD:       complex_div:
// IMPRVD-NEXT:    [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD-NEXT:    [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
// IMPRVD-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
// IMPRVD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// IMPRVD-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// IMPRVD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// IMPRVD-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// IMPRVD-NEXT:    [[TMP22:%.*]] = call float @llvm.fabs.f32(float [[A_REAL]])
// IMPRVD-NEXT:    [[TMP23:%.*]] = call float @llvm.fabs.f32(float [[A_IMAG]])
// IMPRVD-NEXT:    [[ABS_CMP4:%.*]] = fcmp ugt float [[TMP22]], [[TMP23]]
// IMPRVD-NEXT:    br i1 [[ABS_CMP4]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI6:%.*]]
// IMPRVD:       abs_rhsr_greater_or_equal_abs_rhsi5:
// IMPRVD-NEXT:    [[TMP24:%.*]] = fdiv float [[A_IMAG]], [[A_REAL]]
// IMPRVD-NEXT:    [[TMP25:%.*]] = fmul float [[TMP24]], [[A_IMAG]]
// IMPRVD-NEXT:    [[TMP26:%.*]] = fadd float [[A_REAL]], [[TMP25]]
// IMPRVD-NEXT:    [[TMP27:%.*]] = fmul float [[CONV3]], [[TMP24]]
// IMPRVD-NEXT:    [[TMP28:%.*]] = fadd float [[CONV2]], [[TMP27]]
// IMPRVD-NEXT:    [[TMP29:%.*]] = fdiv float [[TMP28]], [[TMP26]]
// IMPRVD-NEXT:    [[TMP30:%.*]] = fmul float [[CONV2]], [[TMP24]]
// IMPRVD-NEXT:    [[TMP31:%.*]] = fsub float [[CONV3]], [[TMP30]]
// IMPRVD-NEXT:    [[TMP32:%.*]] = fdiv float [[TMP31]], [[TMP26]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV7:%.*]]
// IMPRVD:       abs_rhsr_less_than_abs_rhsi6:
// IMPRVD-NEXT:    [[TMP33:%.*]] = fdiv float [[A_REAL]], [[A_IMAG]]
// IMPRVD-NEXT:    [[TMP34:%.*]] = fmul float [[TMP33]], [[A_REAL]]
// IMPRVD-NEXT:    [[TMP35:%.*]] = fadd float [[A_IMAG]], [[TMP34]]
// IMPRVD-NEXT:    [[TMP36:%.*]] = fmul float [[CONV2]], [[TMP33]]
// IMPRVD-NEXT:    [[TMP37:%.*]] = fadd float [[TMP36]], [[CONV3]]
// IMPRVD-NEXT:    [[TMP38:%.*]] = fdiv float [[TMP37]], [[TMP35]]
// IMPRVD-NEXT:    [[TMP39:%.*]] = fmul float [[CONV3]], [[TMP33]]
// IMPRVD-NEXT:    [[TMP40:%.*]] = fsub float [[TMP39]], [[CONV2]]
// IMPRVD-NEXT:    [[TMP41:%.*]] = fdiv float [[TMP40]], [[TMP35]]
// IMPRVD-NEXT:    br label [[COMPLEX_DIV7]]
// IMPRVD:       complex_div7:
// IMPRVD-NEXT:    [[TMP42:%.*]] = phi float [ [[TMP29]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5]] ], [ [[TMP38]], [[ABS_RHSR_LESS_THAN_ABS_RHSI6]] ]
// IMPRVD-NEXT:    [[TMP43:%.*]] = phi float [ [[TMP32]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5]] ], [ [[TMP41]], [[ABS_RHSR_LESS_THAN_ABS_RHSI6]] ]
// IMPRVD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD-NEXT:    store float [[TMP42]], ptr [[RETVAL_REALP]], align 4
// IMPRVD-NEXT:    store float [[TMP43]], ptr [[RETVAL_IMAGP]], align 4
// IMPRVD-NEXT:    [[TMP44:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// IMPRVD-NEXT:    ret <2 x float> [[TMP44]]
//
// PRMTD-LABEL: define dso_local <2 x float> @f1(
// PRMTD-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
// PRMTD-NEXT:  entry:
// PRMTD-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// PRMTD-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// PRMTD-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// PRMTD-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// PRMTD-NEXT:    store <2 x float> [[C_COERCE]], ptr [[C]], align 4
// PRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// PRMTD-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// PRMTD-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// PRMTD-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// PRMTD-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// PRMTD-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// PRMTD-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
// PRMTD-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
// PRMTD-NEXT:    [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
// PRMTD-NEXT:    [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
// PRMTD-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt x86_fp80 [[TMP0]], [[TMP1]]
// PRMTD-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// PRMTD:       abs_rhsr_greater_or_equal_abs_rhsi:
// PRMTD-NEXT:    [[TMP2:%.*]] = fdiv x86_fp80 [[CONV1]], [[CONV]]
// PRMTD-NEXT:    [[TMP3:%.*]] = fmul x86_fp80 [[TMP2]], [[CONV1]]
// PRMTD-NEXT:    [[TMP4:%.*]] = fadd x86_fp80 [[CONV]], [[TMP3]]
// PRMTD-NEXT:    [[TMP5:%.*]] = fmul x86_fp80 [[B_IMAG]], [[TMP2]]
// PRMTD-NEXT:    [[TMP6:%.*]] = fadd x86_fp80 [[B_REAL]], [[TMP5]]
// PRMTD-NEXT:    [[TMP7:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP4]]
// PRMTD-NEXT:    [[TMP8:%.*]] = fmul x86_fp80 [[B_REAL]], [[TMP2]]
// PRMTD-NEXT:    [[TMP9:%.*]] = fsub x86_fp80 [[B_IMAG]], [[TMP8]]
// PRMTD-NEXT:    [[TMP10:%.*]] = fdiv x86_fp80 [[TMP9]], [[TMP4]]
// PRMTD-NEXT:    br label [[COMPLEX_DIV:%.*]]
// PRMTD:       abs_rhsr_less_than_abs_rhsi:
// PRMTD-NEXT:    [[TMP11:%.*]] = fdiv x86_fp80 [[CONV]], [[CONV1]]
// PRMTD-NEXT:    [[TMP12:%.*]] = fmul x86_fp80 [[TMP11]], [[CONV]]
// PRMTD-NEXT:    [[TMP13:%.*]] = fadd x86_fp80 [[CONV1]], [[TMP12]]
// PRMTD-NEXT:    [[TMP14:%.*]] = fmul x86_fp80 [[B_REAL]], [[TMP11]]
// PRMTD-NEXT:    [[TMP15:%.*]] = fadd x86_fp80 [[TMP14]], [[B_IMAG]]
// PRMTD-NEXT:    [[TMP16:%.*]] = fdiv x86_fp80 [[TMP15]], [[TMP13]]
// PRMTD-NEXT:    [[TMP17:%.*]] = fmul x86_fp80 [[B_IMAG]], [[TMP11]]
// PRMTD-NEXT:    [[TMP18:%.*]] = fsub x86_fp80 [[TMP17]], [[B_REAL]]
// PRMTD-NEXT:    [[TMP19:%.*]] = fdiv x86_fp80 [[TMP18]], [[TMP13]]
// PRMTD-NEXT:    br label [[COMPLEX_DIV]]
// PRMTD:       complex_div:
// PRMTD-NEXT:    [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD-NEXT:    [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
// PRMTD-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
// PRMTD-NEXT:    [[EXT:%.*]] = fpext float [[CONV2]] to double
// PRMTD-NEXT:    [[EXT4:%.*]] = fpext float [[CONV3]] to double
// PRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// PRMTD-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// PRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// PRMTD-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// PRMTD-NEXT:    [[EXT5:%.*]] = fpext float [[A_REAL]] to double
// PRMTD-NEXT:    [[EXT6:%.*]] = fpext float [[A_IMAG]] to double
// PRMTD-NEXT:    [[TMP22:%.*]] = fmul double [[EXT]], [[EXT5]]
// PRMTD-NEXT:    [[TMP23:%.*]] = fmul double [[EXT4]], [[EXT6]]
// PRMTD-NEXT:    [[TMP24:%.*]] = fadd double [[TMP22]], [[TMP23]]
// PRMTD-NEXT:    [[TMP25:%.*]] = fmul double [[EXT5]], [[EXT5]]
// PRMTD-NEXT:    [[TMP26:%.*]] = fmul double [[EXT6]], [[EXT6]]
// PRMTD-NEXT:    [[TMP27:%.*]] = fadd double [[TMP25]], [[TMP26]]
// PRMTD-NEXT:    [[TMP28:%.*]] = fmul double [[EXT4]], [[EXT5]]
// PRMTD-NEXT:    [[TMP29:%.*]] = fmul double [[EXT]], [[EXT6]]
// PRMTD-NEXT:    [[TMP30:%.*]] = fsub double [[TMP28]], [[TMP29]]
// PRMTD-NEXT:    [[TMP31:%.*]] = fdiv double [[TMP24]], [[TMP27]]
// PRMTD-NEXT:    [[TMP32:%.*]] = fdiv double [[TMP30]], [[TMP27]]
// PRMTD-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP31]] to float
// PRMTD-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc double [[TMP32]] to float
// PRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// PRMTD-NEXT:    store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
// PRMTD-NEXT:    [[TMP33:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// PRMTD-NEXT:    ret <2 x float> [[TMP33]]
//
// X86WINPRMTD-LABEL: define dso_local i64 @f1(
// X86WINPRMTD-SAME: i64 noundef [[A_COERCE:%.*]], ptr noundef [[B:%.*]], i64 noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
// X86WINPRMTD-NEXT:  entry:
// X86WINPRMTD-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD-NEXT:    store i64 [[A_COERCE]], ptr [[A]], align 4
// X86WINPRMTD-NEXT:    store i64 [[C_COERCE]], ptr [[C]], align 4
// X86WINPRMTD-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// X86WINPRMTD-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// X86WINPRMTD-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to double
// X86WINPRMTD-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to double
// X86WINPRMTD-NEXT:    [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[CONV]])
// X86WINPRMTD-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[CONV1]])
// X86WINPRMTD-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt double [[TMP0]], [[TMP1]]
// X86WINPRMTD-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// X86WINPRMTD:       abs_rhsr_greater_or_equal_abs_rhsi:
// X86WINPRMTD-NEXT:    [[TMP2:%.*]] = fdiv double [[CONV1]], [[CONV]]
// X86WINPRMTD-NEXT:    [[TMP3:%.*]] = fmul double [[TMP2]], [[CONV1]]
// X86WINPRMTD-NEXT:    [[TMP4:%.*]] = fadd double [[CONV]], [[TMP3]]
// X86WINPRMTD-NEXT:    [[TMP5:%.*]] = fmul double [[B_IMAG]], [[TMP2]]
// X86WINPRMTD-NEXT:    [[TMP6:%.*]] = fadd double [[B_REAL]], [[TMP5]]
// X86WINPRMTD-NEXT:    [[TMP7:%.*]] = fdiv double [[TMP6]], [[TMP4]]
// X86WINPRMTD-NEXT:    [[TMP8:%.*]] = fmul double [[B_REAL]], [[TMP2]]
// X86WINPRMTD-NEXT:    [[TMP9:%.*]] = fsub double [[B_IMAG]], [[TMP8]]
// X86WINPRMTD-NEXT:    [[TMP10:%.*]] = fdiv double [[TMP9]], [[TMP4]]
// X86WINPRMTD-NEXT:    br label [[COMPLEX_DIV:%.*]]
// X86WINPRMTD:       abs_rhsr_less_than_abs_rhsi:
// X86WINPRMTD-NEXT:    [[TMP11:%.*]] = fdiv double [[CONV]], [[CONV1]]
// X86WINPRMTD-NEXT:    [[TMP12:%.*]] = fmul double [[TMP11]], [[CONV]]
// X86WINPRMTD-NEXT:    [[TMP13:%.*]] = fadd double [[CONV1]], [[TMP12]]
// X86WINPRMTD-NEXT:    [[TMP14:%.*]] = fmul double [[B_REAL]], [[TMP11]]
// X86WINPRMTD-NEXT:    [[TMP15:%.*]] = fadd double [[TMP14]], [[B_IMAG]]
// X86WINPRMTD-NEXT:    [[TMP16:%.*]] = fdiv double [[TMP15]], [[TMP13]]
// X86WINPRMTD-NEXT:    [[TMP17:%.*]] = fmul double [[B_IMAG]], [[TMP11]]
// X86WINPRMTD-NEXT:    [[TMP18:%.*]] = fsub double [[TMP17]], [[B_REAL]]
// X86WINPRMTD-NEXT:    [[TMP19:%.*]] = fdiv double [[TMP18]], [[TMP13]]
// X86WINPRMTD-NEXT:    br label [[COMPLEX_DIV]]
// X86WINPRMTD:       complex_div:
// X86WINPRMTD-NEXT:    [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD-NEXT:    [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD-NEXT:    [[CONV2:%.*]] = fptrunc double [[TMP20]] to float
// X86WINPRMTD-NEXT:    [[CONV3:%.*]] = fptrunc double [[TMP21]] to float
// X86WINPRMTD-NEXT:    [[EXT:%.*]] = fpext float [[CONV2]] to double
// X86WINPRMTD-NEXT:    [[EXT4:%.*]] = fpext float [[CONV3]] to double
// X86WINPRMTD-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// X86WINPRMTD-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// X86WINPRMTD-NEXT:    [[EXT5:%.*]] = fpext float [[A_REAL]] to double
// X86WINPRMTD-NEXT:    [[EXT6:%.*]] = fpext float [[A_IMAG]] to double
// X86WINPRMTD-NEXT:    [[TMP22:%.*]] = fmul double [[EXT]], [[EXT5]]
// X86WINPRMTD-NEXT:    [[TMP23:%.*]] = fmul double [[EXT4]], [[EXT6]]
// X86WINPRMTD-NEXT:    [[TMP24:%.*]] = fadd double [[TMP22]], [[TMP23]]
// X86WINPRMTD-NEXT:    [[TMP25:%.*]] = fmul double [[EXT5]], [[EXT5]]
// X86WINPRMTD-NEXT:    [[TMP26:%.*]] = fmul double [[EXT6]], [[EXT6]]
// X86WINPRMTD-NEXT:    [[TMP27:%.*]] = fadd double [[TMP25]], [[TMP26]]
// X86WINPRMTD-NEXT:    [[TMP28:%.*]] = fmul double [[EXT4]], [[EXT5]]
// X86WINPRMTD-NEXT:    [[TMP29:%.*]] = fmul double [[EXT]], [[EXT6]]
// X86WINPRMTD-NEXT:    [[TMP30:%.*]] = fsub double [[TMP28]], [[TMP29]]
// X86WINPRMTD-NEXT:    [[TMP31:%.*]] = fdiv double [[TMP24]], [[TMP27]]
// X86WINPRMTD-NEXT:    [[TMP32:%.*]] = fdiv double [[TMP30]], [[TMP27]]
// X86WINPRMTD-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP31]] to float
// X86WINPRMTD-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc double [[TMP32]] to float
// X86WINPRMTD-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// X86WINPRMTD-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// X86WINPRMTD-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// X86WINPRMTD-NEXT:    store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
// X86WINPRMTD-NEXT:    [[TMP33:%.*]] = load i64, ptr [[RETVAL]], align 4
// X86WINPRMTD-NEXT:    ret i64 [[TMP33]]
//
// AVRFP32-LABEL: define dso_local { float, float } @f1(
// AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]], float noundef [[C_COERCE0:%.*]], float noundef [[C_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP32-NEXT:  entry:
// AVRFP32-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[A:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[B:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[C:%.*]] = alloca { float, float }, align 1
// AVRFP32-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP32-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP32-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP32-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP32-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// AVRFP32-NEXT:    store float [[C_COERCE0]], ptr [[TMP4]], align 1
// AVRFP32-NEXT:    [[TMP5:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[C_COERCE1]], ptr [[TMP5]], align 1
// AVRFP32-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
// AVRFP32-NEXT:    [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
// AVRFP32-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
// AVRFP32-NEXT:    [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
// AVRFP32-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// AVRFP32-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 1
// AVRFP32-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// AVRFP32-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP6:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[C_REAL]])
// AVRFP32-NEXT:    [[TMP7:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[C_IMAG]])
// AVRFP32-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt float [[TMP6]], [[TMP7]]
// AVRFP32-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// AVRFP32:       abs_rhsr_greater_or_equal_abs_rhsi:
// AVRFP32-NEXT:    [[TMP8:%.*]] = fdiv float [[C_IMAG]], [[C_REAL]]
// AVRFP32-NEXT:    [[TMP9:%.*]] = fmul float [[TMP8]], [[C_IMAG]]
// AVRFP32-NEXT:    [[TMP10:%.*]] = fadd float [[C_REAL]], [[TMP9]]
// AVRFP32-NEXT:    [[TMP11:%.*]] = fmul float [[B_IMAG]], [[TMP8]]
// AVRFP32-NEXT:    [[TMP12:%.*]] = fadd float [[B_REAL]], [[TMP11]]
// AVRFP32-NEXT:    [[TMP13:%.*]] = fdiv float [[TMP12]], [[TMP10]]
// AVRFP32-NEXT:    [[TMP14:%.*]] = fmul float [[B_REAL]], [[TMP8]]
// AVRFP32-NEXT:    [[TMP15:%.*]] = fsub float [[B_IMAG]], [[TMP14]]
// AVRFP32-NEXT:    [[TMP16:%.*]] = fdiv float [[TMP15]], [[TMP10]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV:%.*]]
// AVRFP32:       abs_rhsr_less_than_abs_rhsi:
// AVRFP32-NEXT:    [[TMP17:%.*]] = fdiv float [[C_REAL]], [[C_IMAG]]
// AVRFP32-NEXT:    [[TMP18:%.*]] = fmul float [[TMP17]], [[C_REAL]]
// AVRFP32-NEXT:    [[TMP19:%.*]] = fadd float [[C_IMAG]], [[TMP18]]
// AVRFP32-NEXT:    [[TMP20:%.*]] = fmul float [[B_REAL]], [[TMP17]]
// AVRFP32-NEXT:    [[TMP21:%.*]] = fadd float [[TMP20]], [[B_IMAG]]
// AVRFP32-NEXT:    [[TMP22:%.*]] = fdiv float [[TMP21]], [[TMP19]]
// AVRFP32-NEXT:    [[TMP23:%.*]] = fmul float [[B_IMAG]], [[TMP17]]
// AVRFP32-NEXT:    [[TMP24:%.*]] = fsub float [[TMP23]], [[B_REAL]]
// AVRFP32-NEXT:    [[TMP25:%.*]] = fdiv float [[TMP24]], [[TMP19]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV]]
// AVRFP32:       complex_div:
// AVRFP32-NEXT:    [[TMP26:%.*]] = phi float [ [[TMP13]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP22]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP32-NEXT:    [[TMP27:%.*]] = phi float [ [[TMP16]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP25]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP32-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP32-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
// AVRFP32-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP32-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP28:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[A_REAL]])
// AVRFP32-NEXT:    [[TMP29:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[A_IMAG]])
// AVRFP32-NEXT:    [[ABS_CMP1:%.*]] = fcmp ugt float [[TMP28]], [[TMP29]]
// AVRFP32-NEXT:    br i1 [[ABS_CMP1]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI2:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI3:%.*]]
// AVRFP32:       abs_rhsr_greater_or_equal_abs_rhsi2:
// AVRFP32-NEXT:    [[TMP30:%.*]] = fdiv float [[A_IMAG]], [[A_REAL]]
// AVRFP32-NEXT:    [[TMP31:%.*]] = fmul float [[TMP30]], [[A_IMAG]]
// AVRFP32-NEXT:    [[TMP32:%.*]] = fadd float [[A_REAL]], [[TMP31]]
// AVRFP32-NEXT:    [[TMP33:%.*]] = fmul float [[TMP27]], [[TMP30]]
// AVRFP32-NEXT:    [[TMP34:%.*]] = fadd float [[TMP26]], [[TMP33]]
// AVRFP32-NEXT:    [[TMP35:%.*]] = fdiv float [[TMP34]], [[TMP32]]
// AVRFP32-NEXT:    [[TMP36:%.*]] = fmul float [[TMP26]], [[TMP30]]
// AVRFP32-NEXT:    [[TMP37:%.*]] = fsub float [[TMP27]], [[TMP36]]
// AVRFP32-NEXT:    [[TMP38:%.*]] = fdiv float [[TMP37]], [[TMP32]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV4:%.*]]
// AVRFP32:       abs_rhsr_less_than_abs_rhsi3:
// AVRFP32-NEXT:    [[TMP39:%.*]] = fdiv float [[A_REAL]], [[A_IMAG]]
// AVRFP32-NEXT:    [[TMP40:%.*]] = fmul float [[TMP39]], [[A_REAL]]
// AVRFP32-NEXT:    [[TMP41:%.*]] = fadd float [[A_IMAG]], [[TMP40]]
// AVRFP32-NEXT:    [[TMP42:%.*]] = fmul float [[TMP26]], [[TMP39]]
// AVRFP32-NEXT:    [[TMP43:%.*]] = fadd float [[TMP42]], [[TMP27]]
// AVRFP32-NEXT:    [[TMP44:%.*]] = fdiv float [[TMP43]], [[TMP41]]
// AVRFP32-NEXT:    [[TMP45:%.*]] = fmul float [[TMP27]], [[TMP39]]
// AVRFP32-NEXT:    [[TMP46:%.*]] = fsub float [[TMP45]], [[TMP26]]
// AVRFP32-NEXT:    [[TMP47:%.*]] = fdiv float [[TMP46]], [[TMP41]]
// AVRFP32-NEXT:    br label [[COMPLEX_DIV4]]
// AVRFP32:       complex_div4:
// AVRFP32-NEXT:    [[TMP48:%.*]] = phi float [ [[TMP35]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI2]] ], [ [[TMP44]], [[ABS_RHSR_LESS_THAN_ABS_RHSI3]] ]
// AVRFP32-NEXT:    [[TMP49:%.*]] = phi float [ [[TMP38]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI2]] ], [ [[TMP47]], [[ABS_RHSR_LESS_THAN_ABS_RHSI3]] ]
// AVRFP32-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP32-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP32-NEXT:    store float [[TMP48]], ptr [[RETVAL_REALP]], align 1
// AVRFP32-NEXT:    store float [[TMP49]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP32-NEXT:    [[TMP50:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP32-NEXT:    ret { float, float } [[TMP50]]
//
// AVRFP64-LABEL: define dso_local { float, float } @f1(
// AVRFP64-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]], float noundef [[C_COERCE0:%.*]], float noundef [[C_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
// AVRFP64-NEXT:  entry:
// AVRFP64-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 1
// AVRFP64-NEXT:    [[A:%.*]] = alloca { float, float }, align 1
// AVRFP64-NEXT:    [[B:%.*]] = alloca { double, double }, align 1
// AVRFP64-NEXT:    [[C:%.*]] = alloca { float, float }, align 1
// AVRFP64-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    store float [[A_COERCE0]], ptr [[TMP0]], align 1
// AVRFP64-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    store float [[A_COERCE1]], ptr [[TMP1]], align 1
// AVRFP64-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    store double [[B_COERCE0]], ptr [[TMP2]], align 1
// AVRFP64-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    store double [[B_COERCE1]], ptr [[TMP3]], align 1
// AVRFP64-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// AVRFP64-NEXT:    store float [[C_COERCE0]], ptr [[TMP4]], align 1
// AVRFP64-NEXT:    [[TMP5:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// AVRFP64-NEXT:    store float [[C_COERCE1]], ptr [[TMP5]], align 1
// AVRFP64-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// AVRFP64-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 1
// AVRFP64-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// AVRFP64-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 1
// AVRFP64-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// AVRFP64-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 1
// AVRFP64-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// AVRFP64-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 1
// AVRFP64-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to double
// AVRFP64-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to double
// AVRFP64-NEXT:    [[TMP6:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[CONV]])
// AVRFP64-NEXT:    [[TMP7:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[CONV1]])
// AVRFP64-NEXT:    [[ABS_CMP:%.*]] = fcmp ugt double [[TMP6]], [[TMP7]]
// AVRFP64-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// AVRFP64:       abs_rhsr_greater_or_equal_abs_rhsi:
// AVRFP64-NEXT:    [[TMP8:%.*]] = fdiv double [[CONV1]], [[CONV]]
// AVRFP64-NEXT:    [[TMP9:%.*]] = fmul double [[TMP8]], [[CONV1]]
// AVRFP64-NEXT:    [[TMP10:%.*]] = fadd double [[CONV]], [[TMP9]]
// AVRFP64-NEXT:    [[TMP11:%.*]] = fmul double [[B_IMAG]], [[TMP8]]
// AVRFP64-NEXT:    [[TMP12:%.*]] = fadd double [[B_REAL]], [[TMP11]]
// AVRFP64-NEXT:    [[TMP13:%.*]] = fdiv double [[TMP12]], [[TMP10]]
// AVRFP64-NEXT:    [[TMP14:%.*]] = fmul double [[B_REAL]], [[TMP8]]
// AVRFP64-NEXT:    [[TMP15:%.*]] = fsub double [[B_IMAG]], [[TMP14]]
// AVRFP64-NEXT:    [[TMP16:%.*]] = fdiv double [[TMP15]], [[TMP10]]
// AVRFP64-NEXT:    br label [[COMPLEX_DIV:%.*]]
// AVRFP64:       abs_rhsr_less_than_abs_rhsi:
// AVRFP64-NEXT:    [[TMP17:%.*]] = fdiv double [[CONV]], [[CONV1]]
// AVRFP64-NEXT:    [[TMP18:%.*]] = fmul double [[TMP17]], [[CONV]]
// AVRFP64-NEXT:    [[TMP19:%.*]] = fadd double [[CONV1]], [[TMP18]]
// AVRFP64-NEXT:    [[TMP20:%.*]] = fmul double [[B_REAL]], [[TMP17]]
// AVRFP64-NEXT:    [[TMP21:%.*]] = fadd double [[TMP20]], [[B_IMAG]]
// AVRFP64-NEXT:    [[TMP22:%.*]] = fdiv double [[TMP21]], [[TMP19]]
// AVRFP64-NEXT:    [[TMP23:%.*]] = fmul double [[B_IMAG]], [[TMP17]]
// AVRFP64-NEXT:    [[TMP24:%.*]] = fsub double [[TMP23]], [[B_REAL]]
// AVRFP64-NEXT:    [[TMP25:%.*]] = fdiv double [[TMP24]], [[TMP19]]
// AVRFP64-NEXT:    br label [[COMPLEX_DIV]]
// AVRFP64:       complex_div:
// AVRFP64-NEXT:    [[TMP26:%.*]] = phi double [ [[TMP13]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP22]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP64-NEXT:    [[TMP27:%.*]] = phi double [ [[TMP16]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP25]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// AVRFP64-NEXT:    [[CONV2:%.*]] = fptrunc double [[TMP26]] to float
// AVRFP64-NEXT:    [[CONV3:%.*]] = fptrunc double [[TMP27]] to float
// AVRFP64-NEXT:    [[EXT:%.*]] = fpext float [[CONV2]] to double
// AVRFP64-NEXT:    [[EXT4:%.*]] = fpext float [[CONV3]] to double
// AVRFP64-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// AVRFP64-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
// AVRFP64-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// AVRFP64-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
// AVRFP64-NEXT:    [[EXT5:%.*]] = fpext float [[A_REAL]] to double
// AVRFP64-NEXT:    [[EXT6:%.*]] = fpext float [[A_IMAG]] to double
// AVRFP64-NEXT:    [[TMP28:%.*]] = fmul double [[EXT]], [[EXT5]]
// AVRFP64-NEXT:    [[TMP29:%.*]] = fmul double [[EXT4]], [[EXT6]]
// AVRFP64-NEXT:    [[TMP30:%.*]] = fadd double [[TMP28]], [[TMP29]]
// AVRFP64-NEXT:    [[TMP31:%.*]] = fmul double [[EXT5]], [[EXT5]]
// AVRFP64-NEXT:    [[TMP32:%.*]] = fmul double [[EXT6]], [[EXT6]]
// AVRFP64-NEXT:    [[TMP33:%.*]] = fadd double [[TMP31]], [[TMP32]]
// AVRFP64-NEXT:    [[TMP34:%.*]] = fmul double [[EXT4]], [[EXT5]]
// AVRFP64-NEXT:    [[TMP35:%.*]] = fmul double [[EXT]], [[EXT6]]
// AVRFP64-NEXT:    [[TMP36:%.*]] = fsub double [[TMP34]], [[TMP35]]
// AVRFP64-NEXT:    [[TMP37:%.*]] = fdiv double [[TMP30]], [[TMP33]]
// AVRFP64-NEXT:    [[TMP38:%.*]] = fdiv double [[TMP36]], [[TMP33]]
// AVRFP64-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP37]] to float
// AVRFP64-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc double [[TMP38]] to float
// AVRFP64-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// AVRFP64-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// AVRFP64-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 1
// AVRFP64-NEXT:    store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 1
// AVRFP64-NEXT:    [[TMP39:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
// AVRFP64-NEXT:    ret { float, float } [[TMP39]]
//
// BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @f1(
// BASIC_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) #[[ATTR0]] {
// BASIC_FAST-NEXT:  entry:
// BASIC_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// BASIC_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// BASIC_FAST-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// BASIC_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// BASIC_FAST-NEXT:    store <2 x float> [[C_COERCE]], ptr [[C]], align 4
// BASIC_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// BASIC_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// BASIC_FAST-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// BASIC_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// BASIC_FAST-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
// BASIC_FAST-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
// BASIC_FAST-NEXT:    [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[CONV]]
// BASIC_FAST-NEXT:    [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[CONV1]]
// BASIC_FAST-NEXT:    [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP0]], [[TMP1]]
// BASIC_FAST-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[CONV]]
// BASIC_FAST-NEXT:    [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[CONV1]]
// BASIC_FAST-NEXT:    [[TMP5:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP3]], [[TMP4]]
// BASIC_FAST-NEXT:    [[TMP6:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[CONV]]
// BASIC_FAST-NEXT:    [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[CONV1]]
// BASIC_FAST-NEXT:    [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP7]]
// BASIC_FAST-NEXT:    [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[TMP5]]
// BASIC_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP8]], [[TMP5]]
// BASIC_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP9]] to float
// BASIC_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP10]] to float
// BASIC_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// BASIC_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// BASIC_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// BASIC_FAST-NEXT:    [[TMP11:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV2]], [[A_REAL]]
// BASIC_FAST-NEXT:    [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV3]], [[A_IMAG]]
// BASIC_FAST-NEXT:    [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP11]], [[TMP12]]
// BASIC_FAST-NEXT:    [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[A_REAL]]
// BASIC_FAST-NEXT:    [[TMP15:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[A_IMAG]]
// BASIC_FAST-NEXT:    [[TMP16:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP14]], [[TMP15]]
// BASIC_FAST-NEXT:    [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV3]], [[A_REAL]]
// BASIC_FAST-NEXT:    [[TMP18:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV2]], [[A_IMAG]]
// BASIC_FAST-NEXT:    [[TMP19:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP17]], [[TMP18]]
// BASIC_FAST-NEXT:    [[TMP20:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP13]], [[TMP16]]
// BASIC_FAST-NEXT:    [[TMP21:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP19]], [[TMP16]]
// BASIC_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// BASIC_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// BASIC_FAST-NEXT:    store float [[TMP20]], ptr [[RETVAL_REALP]], align 4
// BASIC_FAST-NEXT:    store float [[TMP21]], ptr [[RETVAL_IMAGP]], align 4
// BASIC_FAST-NEXT:    [[TMP22:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// BASIC_FAST-NEXT:    ret <2 x float> [[TMP22]]
//
// FULL_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @f1(
// FULL_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) #[[ATTR0]] {
// FULL_FAST-NEXT:  entry:
// FULL_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    [[COERCE:%.*]] = alloca { float, float }, align 4
// FULL_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// FULL_FAST-NEXT:    store <2 x float> [[C_COERCE]], ptr [[C]], align 4
// FULL_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// FULL_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// FULL_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// FULL_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// FULL_FAST-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// FULL_FAST-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// FULL_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// FULL_FAST-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// FULL_FAST-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
// FULL_FAST-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
// FULL_FAST-NEXT:    [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef nofpclass(nan inf) [[B_REAL]], x86_fp80 noundef nofpclass(nan inf) [[B_IMAG]], x86_fp80 noundef nofpclass(nan inf) [[CONV]], x86_fp80 noundef nofpclass(nan inf) [[CONV1]]) #[[ATTR2]]
// FULL_FAST-NEXT:    [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
// FULL_FAST-NEXT:    [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
// FULL_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP0]] to float
// FULL_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP1]] to float
// FULL_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// FULL_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// FULL_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// FULL_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// FULL_FAST-NEXT:    [[CALL4:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) <2 x float> @__divsc3(float noundef nofpclass(nan inf) [[CONV2]], float noundef nofpclass(nan inf) [[CONV3]], float noundef nofpclass(nan inf) [[A_REAL]], float noundef nofpclass(nan inf) [[A_IMAG]]) #[[ATTR2]]
// FULL_FAST-NEXT:    store <2 x float> [[CALL4]], ptr [[COERCE]], align 4
// FULL_FAST-NEXT:    [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
// FULL_FAST-NEXT:    [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
// FULL_FAST-NEXT:    [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
// FULL_FAST-NEXT:    [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
// FULL_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// FULL_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// FULL_FAST-NEXT:    store float [[COERCE_REAL]], ptr [[RETVAL_REALP]], align 4
// FULL_FAST-NEXT:    store float [[COERCE_IMAG]], ptr [[RETVAL_IMAGP]], align 4
// FULL_FAST-NEXT:    [[TMP2:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// FULL_FAST-NEXT:    ret <2 x float> [[TMP2]]
//
// IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @f1(
// IMPRVD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) #[[ATTR0]] {
// IMPRVD_FAST-NEXT:  entry:
// IMPRVD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// IMPRVD_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// IMPRVD_FAST-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// IMPRVD_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// IMPRVD_FAST-NEXT:    store <2 x float> [[C_COERCE]], ptr [[C]], align 4
// IMPRVD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// IMPRVD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// IMPRVD_FAST-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// IMPRVD_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// IMPRVD_FAST-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
// IMPRVD_FAST-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
// IMPRVD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
// IMPRVD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
// IMPRVD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
// IMPRVD_FAST-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// IMPRVD_FAST:       abs_rhsr_greater_or_equal_abs_rhsi:
// IMPRVD_FAST-NEXT:    [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[CONV]]
// IMPRVD_FAST-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[CONV1]]
// IMPRVD_FAST-NEXT:    [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[TMP3]]
// IMPRVD_FAST-NEXT:    [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP2]]
// IMPRVD_FAST-NEXT:    [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP5]]
// IMPRVD_FAST-NEXT:    [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP4]]
// IMPRVD_FAST-NEXT:    [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP2]]
// IMPRVD_FAST-NEXT:    [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP8]]
// IMPRVD_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]], [[TMP4]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV:%.*]]
// IMPRVD_FAST:       abs_rhsr_less_than_abs_rhsi:
// IMPRVD_FAST-NEXT:    [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[CONV1]]
// IMPRVD_FAST-NEXT:    [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP11]], [[CONV]]
// IMPRVD_FAST-NEXT:    [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[TMP12]]
// IMPRVD_FAST-NEXT:    [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP11]]
// IMPRVD_FAST-NEXT:    [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP14]], [[B_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP15]], [[TMP13]]
// IMPRVD_FAST-NEXT:    [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP11]]
// IMPRVD_FAST-NEXT:    [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP17]], [[B_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP18]], [[TMP13]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV]]
// IMPRVD_FAST:       complex_div:
// IMPRVD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// IMPRVD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
// IMPRVD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
// IMPRVD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// IMPRVD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// IMPRVD_FAST-NEXT:    [[TMP22:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[A_REAL]])
// IMPRVD_FAST-NEXT:    [[TMP23:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[A_IMAG]])
// IMPRVD_FAST-NEXT:    [[ABS_CMP4:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt float [[TMP22]], [[TMP23]]
// IMPRVD_FAST-NEXT:    br i1 [[ABS_CMP4]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI6:%.*]]
// IMPRVD_FAST:       abs_rhsr_greater_or_equal_abs_rhsi5:
// IMPRVD_FAST-NEXT:    [[TMP24:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[A_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP25:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[TMP24]], [[A_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP26:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[TMP25]]
// IMPRVD_FAST-NEXT:    [[TMP27:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV3]], [[TMP24]]
// IMPRVD_FAST-NEXT:    [[TMP28:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[CONV2]], [[TMP27]]
// IMPRVD_FAST-NEXT:    [[TMP29:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP28]], [[TMP26]]
// IMPRVD_FAST-NEXT:    [[TMP30:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV2]], [[TMP24]]
// IMPRVD_FAST-NEXT:    [[TMP31:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[CONV3]], [[TMP30]]
// IMPRVD_FAST-NEXT:    [[TMP32:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP31]], [[TMP26]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV7:%.*]]
// IMPRVD_FAST:       abs_rhsr_less_than_abs_rhsi6:
// IMPRVD_FAST-NEXT:    [[TMP33:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[A_IMAG]]
// IMPRVD_FAST-NEXT:    [[TMP34:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[TMP33]], [[A_REAL]]
// IMPRVD_FAST-NEXT:    [[TMP35:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[TMP34]]
// IMPRVD_FAST-NEXT:    [[TMP36:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV2]], [[TMP33]]
// IMPRVD_FAST-NEXT:    [[TMP37:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP36]], [[CONV3]]
// IMPRVD_FAST-NEXT:    [[TMP38:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP37]], [[TMP35]]
// IMPRVD_FAST-NEXT:    [[TMP39:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV3]], [[TMP33]]
// IMPRVD_FAST-NEXT:    [[TMP40:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP39]], [[CONV2]]
// IMPRVD_FAST-NEXT:    [[TMP41:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP40]], [[TMP35]]
// IMPRVD_FAST-NEXT:    br label [[COMPLEX_DIV7]]
// IMPRVD_FAST:       complex_div7:
// IMPRVD_FAST-NEXT:    [[TMP42:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP29]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5]] ], [ [[TMP38]], [[ABS_RHSR_LESS_THAN_ABS_RHSI6]] ]
// IMPRVD_FAST-NEXT:    [[TMP43:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP32]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5]] ], [ [[TMP41]], [[ABS_RHSR_LESS_THAN_ABS_RHSI6]] ]
// IMPRVD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// IMPRVD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// IMPRVD_FAST-NEXT:    store float [[TMP42]], ptr [[RETVAL_REALP]], align 4
// IMPRVD_FAST-NEXT:    store float [[TMP43]], ptr [[RETVAL_IMAGP]], align 4
// IMPRVD_FAST-NEXT:    [[TMP44:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// IMPRVD_FAST-NEXT:    ret <2 x float> [[TMP44]]
//
// PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @f1(
// PRMTD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) #[[ATTR0]] {
// PRMTD_FAST-NEXT:  entry:
// PRMTD_FAST-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// PRMTD_FAST-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// PRMTD_FAST-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// PRMTD_FAST-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// PRMTD_FAST-NEXT:    store <2 x float> [[C_COERCE]], ptr [[C]], align 4
// PRMTD_FAST-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD_FAST-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// PRMTD_FAST-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// PRMTD_FAST-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// PRMTD_FAST-NEXT:    [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
// PRMTD_FAST-NEXT:    [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
// PRMTD_FAST-NEXT:    [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
// PRMTD_FAST-NEXT:    [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
// PRMTD_FAST-NEXT:    [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
// PRMTD_FAST-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// PRMTD_FAST:       abs_rhsr_greater_or_equal_abs_rhsi:
// PRMTD_FAST-NEXT:    [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[CONV]]
// PRMTD_FAST-NEXT:    [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[CONV1]]
// PRMTD_FAST-NEXT:    [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[TMP3]]
// PRMTD_FAST-NEXT:    [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP2]]
// PRMTD_FAST-NEXT:    [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP5]]
// PRMTD_FAST-NEXT:    [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP4]]
// PRMTD_FAST-NEXT:    [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP2]]
// PRMTD_FAST-NEXT:    [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP8]]
// PRMTD_FAST-NEXT:    [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]], [[TMP4]]
// PRMTD_FAST-NEXT:    br label [[COMPLEX_DIV:%.*]]
// PRMTD_FAST:       abs_rhsr_less_than_abs_rhsi:
// PRMTD_FAST-NEXT:    [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[CONV1]]
// PRMTD_FAST-NEXT:    [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP11]], [[CONV]]
// PRMTD_FAST-NEXT:    [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[TMP12]]
// PRMTD_FAST-NEXT:    [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP11]]
// PRMTD_FAST-NEXT:    [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP14]], [[B_IMAG]]
// PRMTD_FAST-NEXT:    [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP15]], [[TMP13]]
// PRMTD_FAST-NEXT:    [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP11]]
// PRMTD_FAST-NEXT:    [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP17]], [[B_REAL]]
// PRMTD_FAST-NEXT:    [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP18]], [[TMP13]]
// PRMTD_FAST-NEXT:    br label [[COMPLEX_DIV]]
// PRMTD_FAST:       complex_div:
// PRMTD_FAST-NEXT:    [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD_FAST-NEXT:    [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD_FAST-NEXT:    [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
// PRMTD_FAST-NEXT:    [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
// PRMTD_FAST-NEXT:    [[EXT:%.*]] = fpext float [[CONV2]] to double
// PRMTD_FAST-NEXT:    [[EXT4:%.*]] = fpext float [[CONV3]] to double
// PRMTD_FAST-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// PRMTD_FAST-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// PRMTD_FAST-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// PRMTD_FAST-NEXT:    [[EXT5:%.*]] = fpext float [[A_REAL]] to double
// PRMTD_FAST-NEXT:    [[EXT6:%.*]] = fpext float [[A_IMAG]] to double
// PRMTD_FAST-NEXT:    [[TMP22:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT5]]
// PRMTD_FAST-NEXT:    [[TMP23:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT4]], [[EXT6]]
// PRMTD_FAST-NEXT:    [[TMP24:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP22]], [[TMP23]]
// PRMTD_FAST-NEXT:    [[TMP25:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT5]], [[EXT5]]
// PRMTD_FAST-NEXT:    [[TMP26:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT6]], [[EXT6]]
// PRMTD_FAST-NEXT:    [[TMP27:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP25]], [[TMP26]]
// PRMTD_FAST-NEXT:    [[TMP28:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT4]], [[EXT5]]
// PRMTD_FAST-NEXT:    [[TMP29:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT6]]
// PRMTD_FAST-NEXT:    [[TMP30:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP28]], [[TMP29]]
// PRMTD_FAST-NEXT:    [[TMP31:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP24]], [[TMP27]]
// PRMTD_FAST-NEXT:    [[TMP32:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP30]], [[TMP27]]
// PRMTD_FAST-NEXT:    [[UNPROMOTION:%.*]] = fptrunc double [[TMP31]] to float
// PRMTD_FAST-NEXT:    [[UNPROMOTION7:%.*]] = fptrunc double [[TMP32]] to float
// PRMTD_FAST-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_FAST-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_FAST-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// PRMTD_FAST-NEXT:    store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
// PRMTD_FAST-NEXT:    [[TMP33:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// PRMTD_FAST-NEXT:    ret <2 x float> [[TMP33]]
//
// X86WINPRMTD_STRICT-LABEL: define dso_local i64 @f1(
// X86WINPRMTD_STRICT-SAME: i64 noundef [[A_COERCE:%.*]], ptr noundef [[B:%.*]], i64 noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
// X86WINPRMTD_STRICT-NEXT:  entry:
// X86WINPRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD_STRICT-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD_STRICT-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// X86WINPRMTD_STRICT-NEXT:    [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
// X86WINPRMTD_STRICT-NEXT:    store i64 [[A_COERCE]], ptr [[A]], align 4
// X86WINPRMTD_STRICT-NEXT:    store i64 [[C_COERCE]], ptr [[C]], align 4
// X86WINPRMTD_STRICT-NEXT:    store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
// X86WINPRMTD_STRICT-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[C_REAL]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[C_IMAG]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[CONV]]) #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[CONV1]]) #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// X86WINPRMTD_STRICT:       abs_rhsr_greater_or_equal_abs_rhsi:
// X86WINPRMTD_STRICT-NEXT:    [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[CONV1]], double [[CONV]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP2]], double [[CONV1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP4:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[CONV]], double [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP5:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_IMAG]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP6:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_REAL]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP7:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP6]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP8:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_REAL]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP9:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[B_IMAG]], double [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP9]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV:%.*]]
// X86WINPRMTD_STRICT:       abs_rhsr_less_than_abs_rhsi:
// X86WINPRMTD_STRICT-NEXT:    [[TMP11:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[CONV]], double [[CONV1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP11]], double [[CONV]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[CONV1]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP14:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_REAL]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP14]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP16:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP15]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP17:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_IMAG]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP18:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP17]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP19:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP18]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV]]
// X86WINPRMTD_STRICT:       complex_div:
// X86WINPRMTD_STRICT-NEXT:    [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD_STRICT-NEXT:    [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// X86WINPRMTD_STRICT-NEXT:    [[CONV2:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[CONV3:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[EXT:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CONV2]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[EXT4:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CONV3]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[EXT5:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[EXT6:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP22:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP23:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT4]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP24:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP22]], double [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP25:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT5]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP26:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT6]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP27:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP25]], double [[TMP26]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT4]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP29:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP30:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP28]], double [[TMP29]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP31:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP24]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[TMP32:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP30]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[UNPROMOTION:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[UNPROMOTION7:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP32]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// X86WINPRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// X86WINPRMTD_STRICT-NEXT:    store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT:    [[TMP33:%.*]] = load i64, ptr [[RETVAL]], align 4
// X86WINPRMTD_STRICT-NEXT:    ret i64 [[TMP33]]
//
// PRMTD_STRICT-LABEL: define dso_local <2 x float> @f1(
// PRMTD_STRICT-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
// PRMTD_STRICT-NEXT:  entry:
// PRMTD_STRICT-NEXT:    [[RETVAL:%.*]] = alloca { float, float }, align 4
// PRMTD_STRICT-NEXT:    [[A:%.*]] = alloca { float, float }, align 4
// PRMTD_STRICT-NEXT:    [[C:%.*]] = alloca { float, float }, align 4
// PRMTD_STRICT-NEXT:    store <2 x float> [[A_COERCE]], ptr [[A]], align 4
// PRMTD_STRICT-NEXT:    store <2 x float> [[C_COERCE]], ptr [[C]], align 4
// PRMTD_STRICT-NEXT:    [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD_STRICT-NEXT:    [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
// PRMTD_STRICT-NEXT:    [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
// PRMTD_STRICT-NEXT:    [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// PRMTD_STRICT-NEXT:    [[CONV:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(float [[C_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[CONV1:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(float [[C_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]]) #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]]) #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// PRMTD_STRICT:       abs_rhsr_greater_or_equal_abs_rhsi:
// PRMTD_STRICT-NEXT:    [[TMP2:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[CONV1]], x86_fp80 [[CONV]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP3:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[TMP2]], x86_fp80 [[CONV1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP4:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[CONV]], x86_fp80 [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP5:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP6:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[B_REAL]], x86_fp80 [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP7:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP6]], x86_fp80 [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP8:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[B_REAL]], x86_fp80 [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP9:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP10:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP9]], x86_fp80 [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV:%.*]]
// PRMTD_STRICT:       abs_rhsr_less_than_abs_rhsi:
// PRMTD_STRICT-NEXT:    [[TMP11:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[CONV]], x86_fp80 [[CONV1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP12:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[TMP11]], x86_fp80 [[CONV]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP13:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[CONV1]], x86_fp80 [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP14:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[B_REAL]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP15:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[TMP14]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP16:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP15]], x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP17:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP18:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[TMP17]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP19:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP18]], x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    br label [[COMPLEX_DIV]]
// PRMTD_STRICT:       complex_div:
// PRMTD_STRICT-NEXT:    [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD_STRICT-NEXT:    [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
// PRMTD_STRICT-NEXT:    [[CONV2:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f80(x86_fp80 [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[CONV3:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f80(x86_fp80 [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[EXT:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CONV2]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[EXT4:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CONV3]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
// PRMTD_STRICT-NEXT:    [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
// PRMTD_STRICT-NEXT:    [[EXT5:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[EXT6:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP22:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP23:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT4]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP24:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP22]], double [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP25:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT5]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP26:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT6]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP27:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP25]], double [[TMP26]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT4]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP29:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP30:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP28]], double [[TMP29]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP31:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP24]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[TMP32:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP30]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[UNPROMOTION:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[UNPROMOTION7:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP32]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT:    [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
// PRMTD_STRICT-NEXT:    [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
// PRMTD_STRICT-NEXT:    store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
// PRMTD_STRICT-NEXT:    store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
// PRMTD_STRICT-NEXT:    [[TMP33:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
// PRMTD_STRICT-NEXT:    ret <2 x float> [[TMP33]]
//
_Complex float f1(_Complex float a, _Complex long double b, _Complex float c) {
  return (_Complex float)(b / c) / a;
}
//.
// FULL: [[PROF2]] = !{!"branch_weights", i32 1, i32 1048575}
//.
// FULL_FAST: [[PROF2]] = !{!"branch_weights", i32 1, i32 1048575}
//.