llvm/llvm/test/CodeGen/LoongArch/inline-asm-constraint-f.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 --mattr=+d --target-abi=ilp32d --verify-machineinstrs < %s \
; RUN:   | FileCheck --check-prefix=LA32 %s
; RUN: llc --mtriple=loongarch64 --mattr=+d --target-abi=lp64d --verify-machineinstrs < %s \
; RUN:   | FileCheck --check-prefix=LA64 %s

@gd = external dso_local global double

define double @constraint_f_double(double %a) nounwind {
; LA32-LABEL: constraint_f_double:
; LA32:       # %bb.0:
; LA32-NEXT:    pcalau12i $a0, %pc_hi20(gd)
; LA32-NEXT:    fld.d $fa1, $a0, %pc_lo12(gd)
; LA32-NEXT:    #APP
; LA32-NEXT:    fadd.d $fa0, $fa0, $fa1
; LA32-NEXT:    #NO_APP
; LA32-NEXT:    ret
;
; LA64-LABEL: constraint_f_double:
; LA64:       # %bb.0:
; LA64-NEXT:    pcalau12i $a0, %pc_hi20(gd)
; LA64-NEXT:    fld.d $fa1, $a0, %pc_lo12(gd)
; LA64-NEXT:    #APP
; LA64-NEXT:    fadd.d $fa0, $fa0, $fa1
; LA64-NEXT:    #NO_APP
; LA64-NEXT:    ret
  %1 = load double, ptr @gd
  %2 = tail call double asm "fadd.d $0, $1, $2", "=f,f,f"(double %a, double %1)
  ret double %2
}

define double @constraint_gpr(double %a) {
; LA32-LABEL: constraint_gpr:
; LA32:       # %bb.0:
; LA32-NEXT:    addi.w $sp, $sp, -16
; LA32-NEXT:    .cfi_def_cfa_offset 16
; LA32-NEXT:    fst.d $fa0, $sp, 8
; LA32-NEXT:    ld.w $a7, $sp, 8
; LA32-NEXT:    ld.w $t0, $sp, 12
; LA32-NEXT:    #APP
; LA32-NEXT:    move $a6, $a7
; LA32-NEXT:    #NO_APP
; LA32-NEXT:    st.w $a7, $sp, 4
; LA32-NEXT:    st.w $a6, $sp, 0
; LA32-NEXT:    fld.d $fa0, $sp, 0
; LA32-NEXT:    addi.w $sp, $sp, 16
; LA32-NEXT:    ret
;
; LA64-LABEL: constraint_gpr:
; LA64:       # %bb.0:
; LA64-NEXT:    .cfi_def_cfa_offset 0
; LA64-NEXT:    movfr2gr.d $a7, $fa0
; LA64-NEXT:    #APP
; LA64-NEXT:    move $a6, $a7
; LA64-NEXT:    #NO_APP
; LA64-NEXT:    movgr2fr.d $fa0, $a6
; LA64-NEXT:    ret
  %1 = tail call double asm sideeffect alignstack "move $0, $1", "={$r10},{$r11}"(double %a)
  ret double %1
}