; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64F
; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64D
define float @float_fadd_acquire(ptr %p) nounwind {
; LA64F-LABEL: float_fadd_acquire:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB0_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB0_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB0_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB0_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB0_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB0_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB0_3
; LA64F-NEXT: b .LBB0_6
; LA64F-NEXT: .LBB0_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB0_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB0_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB0_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB0_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fadd_acquire:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB0_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB0_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB0_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB0_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB0_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB0_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB0_3
; LA64D-NEXT: b .LBB0_6
; LA64D-NEXT: .LBB0_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB0_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB0_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB0_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB0_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, float 1.0 acquire, align 4
ret float %v
}
define float @float_fsub_acquire(ptr %p) nounwind {
; LA64F-LABEL: float_fsub_acquire:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0)
; LA64F-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI1_0)
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB1_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB1_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB1_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB1_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB1_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB1_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB1_3
; LA64F-NEXT: b .LBB1_6
; LA64F-NEXT: .LBB1_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB1_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB1_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB1_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB1_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fsub_acquire:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a1, %pc_hi20(.LCPI1_0)
; LA64D-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI1_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB1_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB1_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB1_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB1_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB1_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB1_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB1_3
; LA64D-NEXT: b .LBB1_6
; LA64D-NEXT: .LBB1_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB1_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB1_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB1_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB1_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, float 1.0 acquire, align 4
ret float %v
}
define float @float_fmin_acquire(ptr %p) nounwind {
; LA64F-LABEL: float_fmin_acquire:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB2_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB2_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB2_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB2_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB2_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB2_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB2_3
; LA64F-NEXT: b .LBB2_6
; LA64F-NEXT: .LBB2_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB2_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB2_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB2_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB2_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmin_acquire:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB2_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB2_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB2_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB2_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB2_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB2_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB2_3
; LA64D-NEXT: b .LBB2_6
; LA64D-NEXT: .LBB2_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB2_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB2_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB2_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB2_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, float 1.0 acquire, align 4
ret float %v
}
define float @float_fmax_acquire(ptr %p) nounwind {
; LA64F-LABEL: float_fmax_acquire:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB3_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB3_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB3_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB3_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB3_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB3_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB3_3
; LA64F-NEXT: b .LBB3_6
; LA64F-NEXT: .LBB3_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB3_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB3_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB3_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB3_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmax_acquire:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB3_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB3_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB3_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB3_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB3_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB3_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB3_3
; LA64D-NEXT: b .LBB3_6
; LA64D-NEXT: .LBB3_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB3_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB3_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB3_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB3_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, float 1.0 acquire, align 4
ret float %v
}
define double @double_fadd_acquire(ptr %p) nounwind {
; LA64F-LABEL: double_fadd_acquire:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB4_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 2
; LA64F-NEXT: ori $a5, $zero, 2
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB4_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fadd_acquire:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB4_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 2
; LA64D-NEXT: ori $a5, $zero, 2
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB4_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, double 1.0 acquire, align 4
ret double %v
}
define double @double_fsub_acquire(ptr %p) nounwind {
; LA64F-LABEL: double_fsub_acquire:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB5_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 2
; LA64F-NEXT: ori $a5, $zero, 2
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB5_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fsub_acquire:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI5_0)
; LA64D-NEXT: fld.d $fs0, $a0, %pc_lo12(.LCPI5_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB5_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 2
; LA64D-NEXT: ori $a5, $zero, 2
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB5_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, double 1.0 acquire, align 4
ret double %v
}
define double @double_fmin_acquire(ptr %p) nounwind {
; LA64F-LABEL: double_fmin_acquire:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB6_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmin)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 2
; LA64F-NEXT: ori $a5, $zero, 2
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB6_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmin_acquire:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB6_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmin.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 2
; LA64D-NEXT: ori $a5, $zero, 2
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB6_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, double 1.0 acquire, align 4
ret double %v
}
define double @double_fmax_acquire(ptr %p) nounwind {
; LA64F-LABEL: double_fmax_acquire:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB7_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmax)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 2
; LA64F-NEXT: ori $a5, $zero, 2
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB7_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmax_acquire:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB7_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmax.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 2
; LA64D-NEXT: ori $a5, $zero, 2
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB7_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, double 1.0 acquire, align 4
ret double %v
}
define float @float_fadd_release(ptr %p) nounwind {
; LA64F-LABEL: float_fadd_release:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB8_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB8_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB8_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB8_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB8_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB8_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB8_3
; LA64F-NEXT: b .LBB8_6
; LA64F-NEXT: .LBB8_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB8_1 Depth=1
; LA64F-NEXT: dbar 1792
; LA64F-NEXT: .LBB8_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB8_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB8_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fadd_release:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB8_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB8_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB8_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB8_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB8_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB8_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB8_3
; LA64D-NEXT: b .LBB8_6
; LA64D-NEXT: .LBB8_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB8_1 Depth=1
; LA64D-NEXT: dbar 1792
; LA64D-NEXT: .LBB8_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB8_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB8_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, float 1.0 release, align 4
ret float %v
}
define float @float_fsub_release(ptr %p) nounwind {
; LA64F-LABEL: float_fsub_release:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: pcalau12i $a1, %pc_hi20(.LCPI9_0)
; LA64F-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI9_0)
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB9_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB9_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB9_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB9_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB9_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB9_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB9_3
; LA64F-NEXT: b .LBB9_6
; LA64F-NEXT: .LBB9_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB9_1 Depth=1
; LA64F-NEXT: dbar 1792
; LA64F-NEXT: .LBB9_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB9_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB9_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fsub_release:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a1, %pc_hi20(.LCPI9_0)
; LA64D-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI9_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB9_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB9_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB9_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB9_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB9_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB9_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB9_3
; LA64D-NEXT: b .LBB9_6
; LA64D-NEXT: .LBB9_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB9_1 Depth=1
; LA64D-NEXT: dbar 1792
; LA64D-NEXT: .LBB9_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB9_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB9_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, float 1.0 release, align 4
ret float %v
}
define float @float_fmin_release(ptr %p) nounwind {
; LA64F-LABEL: float_fmin_release:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB10_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB10_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB10_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB10_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB10_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB10_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB10_3
; LA64F-NEXT: b .LBB10_6
; LA64F-NEXT: .LBB10_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB10_1 Depth=1
; LA64F-NEXT: dbar 1792
; LA64F-NEXT: .LBB10_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB10_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB10_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmin_release:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB10_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB10_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB10_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB10_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB10_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB10_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB10_3
; LA64D-NEXT: b .LBB10_6
; LA64D-NEXT: .LBB10_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB10_1 Depth=1
; LA64D-NEXT: dbar 1792
; LA64D-NEXT: .LBB10_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB10_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB10_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, float 1.0 release, align 4
ret float %v
}
define float @float_fmax_release(ptr %p) nounwind {
; LA64F-LABEL: float_fmax_release:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB11_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB11_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB11_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB11_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB11_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB11_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB11_3
; LA64F-NEXT: b .LBB11_6
; LA64F-NEXT: .LBB11_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB11_1 Depth=1
; LA64F-NEXT: dbar 1792
; LA64F-NEXT: .LBB11_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB11_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB11_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmax_release:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB11_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB11_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB11_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB11_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB11_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB11_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB11_3
; LA64D-NEXT: b .LBB11_6
; LA64D-NEXT: .LBB11_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB11_1 Depth=1
; LA64D-NEXT: dbar 1792
; LA64D-NEXT: .LBB11_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB11_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB11_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, float 1.0 release, align 4
ret float %v
}
define double @double_fadd_release(ptr %p) nounwind {
; LA64F-LABEL: double_fadd_release:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB12_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 3
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: move $a5, $zero
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB12_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fadd_release:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB12_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 3
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: move $a5, $zero
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB12_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, double 1.0 release, align 4
ret double %v
}
define double @double_fsub_release(ptr %p) nounwind {
; LA64F-LABEL: double_fsub_release:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB13_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 3
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: move $a5, $zero
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB13_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fsub_release:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI13_0)
; LA64D-NEXT: fld.d $fs0, $a0, %pc_lo12(.LCPI13_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB13_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 3
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: move $a5, $zero
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB13_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, double 1.0 release, align 4
ret double %v
}
define double @double_fmin_release(ptr %p) nounwind {
; LA64F-LABEL: double_fmin_release:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB14_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmin)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 3
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: move $a5, $zero
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB14_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmin_release:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB14_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmin.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 3
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: move $a5, $zero
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB14_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, double 1.0 release, align 4
ret double %v
}
define double @double_fmax_release(ptr %p) nounwind {
; LA64F-LABEL: double_fmax_release:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB15_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmax)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 3
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: move $a5, $zero
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB15_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmax_release:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB15_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmax.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 3
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: move $a5, $zero
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB15_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, double 1.0 release, align 4
ret double %v
}
define float @float_fadd_acq_rel(ptr %p) nounwind {
; LA64F-LABEL: float_fadd_acq_rel:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB16_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB16_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB16_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB16_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB16_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB16_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB16_3
; LA64F-NEXT: b .LBB16_6
; LA64F-NEXT: .LBB16_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB16_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB16_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB16_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB16_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fadd_acq_rel:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB16_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB16_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB16_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB16_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB16_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB16_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB16_3
; LA64D-NEXT: b .LBB16_6
; LA64D-NEXT: .LBB16_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB16_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB16_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB16_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB16_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, float 1.0 acq_rel, align 4
ret float %v
}
define float @float_fsub_acq_rel(ptr %p) nounwind {
; LA64F-LABEL: float_fsub_acq_rel:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: pcalau12i $a1, %pc_hi20(.LCPI17_0)
; LA64F-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI17_0)
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB17_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB17_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB17_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB17_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB17_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB17_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB17_3
; LA64F-NEXT: b .LBB17_6
; LA64F-NEXT: .LBB17_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB17_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB17_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB17_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB17_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fsub_acq_rel:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a1, %pc_hi20(.LCPI17_0)
; LA64D-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI17_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB17_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB17_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB17_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB17_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB17_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB17_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB17_3
; LA64D-NEXT: b .LBB17_6
; LA64D-NEXT: .LBB17_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB17_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB17_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB17_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB17_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, float 1.0 acq_rel, align 4
ret float %v
}
define float @float_fmin_acq_rel(ptr %p) nounwind {
; LA64F-LABEL: float_fmin_acq_rel:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB18_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB18_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB18_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB18_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB18_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB18_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB18_3
; LA64F-NEXT: b .LBB18_6
; LA64F-NEXT: .LBB18_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB18_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB18_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB18_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB18_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmin_acq_rel:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB18_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB18_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB18_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB18_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB18_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB18_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB18_3
; LA64D-NEXT: b .LBB18_6
; LA64D-NEXT: .LBB18_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB18_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB18_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB18_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB18_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, float 1.0 acq_rel, align 4
ret float %v
}
define float @float_fmax_acq_rel(ptr %p) nounwind {
; LA64F-LABEL: float_fmax_acq_rel:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB19_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB19_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB19_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB19_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB19_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB19_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB19_3
; LA64F-NEXT: b .LBB19_6
; LA64F-NEXT: .LBB19_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB19_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB19_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB19_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB19_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmax_acq_rel:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB19_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB19_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB19_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB19_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB19_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB19_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB19_3
; LA64D-NEXT: b .LBB19_6
; LA64D-NEXT: .LBB19_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB19_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB19_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB19_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB19_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, float 1.0 acq_rel, align 4
ret float %v
}
define double @double_fadd_acq_rel(ptr %p) nounwind {
; LA64F-LABEL: double_fadd_acq_rel:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB20_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 4
; LA64F-NEXT: ori $a5, $zero, 2
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB20_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fadd_acq_rel:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB20_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 4
; LA64D-NEXT: ori $a5, $zero, 2
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB20_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, double 1.0 acq_rel, align 4
ret double %v
}
define double @double_fsub_acq_rel(ptr %p) nounwind {
; LA64F-LABEL: double_fsub_acq_rel:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB21_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 4
; LA64F-NEXT: ori $a5, $zero, 2
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB21_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fsub_acq_rel:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI21_0)
; LA64D-NEXT: fld.d $fs0, $a0, %pc_lo12(.LCPI21_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB21_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 4
; LA64D-NEXT: ori $a5, $zero, 2
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB21_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, double 1.0 acq_rel, align 4
ret double %v
}
define double @double_fmin_acq_rel(ptr %p) nounwind {
; LA64F-LABEL: double_fmin_acq_rel:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB22_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmin)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 4
; LA64F-NEXT: ori $a5, $zero, 2
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB22_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmin_acq_rel:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB22_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmin.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 4
; LA64D-NEXT: ori $a5, $zero, 2
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB22_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, double 1.0 acq_rel, align 4
ret double %v
}
define double @double_fmax_acq_rel(ptr %p) nounwind {
; LA64F-LABEL: double_fmax_acq_rel:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB23_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmax)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 4
; LA64F-NEXT: ori $a5, $zero, 2
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB23_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmax_acq_rel:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB23_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmax.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 4
; LA64D-NEXT: ori $a5, $zero, 2
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB23_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, double 1.0 acq_rel, align 4
ret double %v
}
define float @float_fadd_seq_cst(ptr %p) nounwind {
; LA64F-LABEL: float_fadd_seq_cst:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB24_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB24_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB24_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB24_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB24_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB24_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB24_3
; LA64F-NEXT: b .LBB24_6
; LA64F-NEXT: .LBB24_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB24_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB24_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB24_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB24_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fadd_seq_cst:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB24_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB24_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB24_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB24_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB24_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB24_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB24_3
; LA64D-NEXT: b .LBB24_6
; LA64D-NEXT: .LBB24_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB24_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB24_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB24_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB24_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, float 1.0 seq_cst, align 4
ret float %v
}
define float @float_fsub_seq_cst(ptr %p) nounwind {
; LA64F-LABEL: float_fsub_seq_cst:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: pcalau12i $a1, %pc_hi20(.LCPI25_0)
; LA64F-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI25_0)
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB25_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB25_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB25_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB25_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB25_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB25_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB25_3
; LA64F-NEXT: b .LBB25_6
; LA64F-NEXT: .LBB25_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB25_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB25_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB25_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB25_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fsub_seq_cst:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a1, %pc_hi20(.LCPI25_0)
; LA64D-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI25_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB25_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB25_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB25_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB25_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB25_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB25_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB25_3
; LA64D-NEXT: b .LBB25_6
; LA64D-NEXT: .LBB25_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB25_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB25_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB25_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB25_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, float 1.0 seq_cst, align 4
ret float %v
}
define float @float_fmin_seq_cst(ptr %p) nounwind {
; LA64F-LABEL: float_fmin_seq_cst:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB26_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB26_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB26_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB26_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB26_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB26_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB26_3
; LA64F-NEXT: b .LBB26_6
; LA64F-NEXT: .LBB26_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB26_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB26_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB26_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB26_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmin_seq_cst:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB26_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB26_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB26_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB26_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB26_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB26_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB26_3
; LA64D-NEXT: b .LBB26_6
; LA64D-NEXT: .LBB26_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB26_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB26_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB26_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB26_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, float 1.0 seq_cst, align 4
ret float %v
}
define float @float_fmax_seq_cst(ptr %p) nounwind {
; LA64F-LABEL: float_fmax_seq_cst:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB27_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB27_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB27_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB27_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB27_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB27_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB27_3
; LA64F-NEXT: b .LBB27_6
; LA64F-NEXT: .LBB27_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB27_1 Depth=1
; LA64F-NEXT: dbar 20
; LA64F-NEXT: .LBB27_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB27_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB27_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmax_seq_cst:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB27_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB27_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB27_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB27_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB27_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB27_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB27_3
; LA64D-NEXT: b .LBB27_6
; LA64D-NEXT: .LBB27_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB27_1 Depth=1
; LA64D-NEXT: dbar 20
; LA64D-NEXT: .LBB27_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB27_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB27_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, float 1.0 seq_cst, align 4
ret float %v
}
define double @double_fadd_seq_cst(ptr %p) nounwind {
; LA64F-LABEL: double_fadd_seq_cst:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB28_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 5
; LA64F-NEXT: ori $a5, $zero, 5
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB28_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fadd_seq_cst:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB28_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 5
; LA64D-NEXT: ori $a5, $zero, 5
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB28_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, double 1.0 seq_cst, align 4
ret double %v
}
define double @double_fsub_seq_cst(ptr %p) nounwind {
; LA64F-LABEL: double_fsub_seq_cst:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB29_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 5
; LA64F-NEXT: ori $a5, $zero, 5
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB29_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fsub_seq_cst:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI29_0)
; LA64D-NEXT: fld.d $fs0, $a0, %pc_lo12(.LCPI29_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB29_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 5
; LA64D-NEXT: ori $a5, $zero, 5
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB29_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, double 1.0 seq_cst, align 4
ret double %v
}
define double @double_fmin_seq_cst(ptr %p) nounwind {
; LA64F-LABEL: double_fmin_seq_cst:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB30_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmin)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 5
; LA64F-NEXT: ori $a5, $zero, 5
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB30_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmin_seq_cst:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB30_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmin.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 5
; LA64D-NEXT: ori $a5, $zero, 5
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB30_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, double 1.0 seq_cst, align 4
ret double %v
}
define double @double_fmax_seq_cst(ptr %p) nounwind {
; LA64F-LABEL: double_fmax_seq_cst:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB31_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmax)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: ori $a4, $zero, 5
; LA64F-NEXT: ori $a5, $zero, 5
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB31_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmax_seq_cst:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB31_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmax.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: ori $a4, $zero, 5
; LA64D-NEXT: ori $a5, $zero, 5
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB31_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, double 1.0 seq_cst, align 4
ret double %v
}
define float @float_fadd_monotonic(ptr %p) nounwind {
; LA64F-LABEL: float_fadd_monotonic:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB32_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB32_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB32_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB32_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB32_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB32_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB32_3
; LA64F-NEXT: b .LBB32_6
; LA64F-NEXT: .LBB32_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB32_1 Depth=1
; LA64F-NEXT: dbar 1792
; LA64F-NEXT: .LBB32_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB32_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB32_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fadd_monotonic:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB32_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB32_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB32_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB32_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB32_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB32_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB32_3
; LA64D-NEXT: b .LBB32_6
; LA64D-NEXT: .LBB32_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB32_1 Depth=1
; LA64D-NEXT: dbar 1792
; LA64D-NEXT: .LBB32_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB32_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB32_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, float 1.0 monotonic, align 4
ret float %v
}
define float @float_fsub_monotonic(ptr %p) nounwind {
; LA64F-LABEL: float_fsub_monotonic:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: pcalau12i $a1, %pc_hi20(.LCPI33_0)
; LA64F-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI33_0)
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB33_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB33_3 Depth 2
; LA64F-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB33_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB33_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB33_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB33_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB33_3
; LA64F-NEXT: b .LBB33_6
; LA64F-NEXT: .LBB33_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB33_1 Depth=1
; LA64F-NEXT: dbar 1792
; LA64F-NEXT: .LBB33_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB33_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB33_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fsub_monotonic:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a1, %pc_hi20(.LCPI33_0)
; LA64D-NEXT: fld.s $fa1, $a1, %pc_lo12(.LCPI33_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB33_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB33_3 Depth 2
; LA64D-NEXT: fadd.s $fa2, $fa0, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB33_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB33_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB33_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB33_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB33_3
; LA64D-NEXT: b .LBB33_6
; LA64D-NEXT: .LBB33_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB33_1 Depth=1
; LA64D-NEXT: dbar 1792
; LA64D-NEXT: .LBB33_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB33_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB33_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, float 1.0 monotonic, align 4
ret float %v
}
define float @float_fmin_monotonic(ptr %p) nounwind {
; LA64F-LABEL: float_fmin_monotonic:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB34_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB34_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB34_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB34_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB34_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB34_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB34_3
; LA64F-NEXT: b .LBB34_6
; LA64F-NEXT: .LBB34_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB34_1 Depth=1
; LA64F-NEXT: dbar 1792
; LA64F-NEXT: .LBB34_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB34_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB34_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmin_monotonic:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB34_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB34_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmin.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB34_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB34_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB34_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB34_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB34_3
; LA64D-NEXT: b .LBB34_6
; LA64D-NEXT: .LBB34_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB34_1 Depth=1
; LA64D-NEXT: dbar 1792
; LA64D-NEXT: .LBB34_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB34_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB34_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, float 1.0 monotonic, align 4
ret float %v
}
define float @float_fmax_monotonic(ptr %p) nounwind {
; LA64F-LABEL: float_fmax_monotonic:
; LA64F: # %bb.0:
; LA64F-NEXT: fld.s $fa0, $a0, 0
; LA64F-NEXT: addi.w $a1, $zero, 1
; LA64F-NEXT: movgr2fr.w $fa1, $a1
; LA64F-NEXT: ffint.s.w $fa1, $fa1
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB35_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Loop Header: Depth=1
; LA64F-NEXT: # Child Loop BB35_3 Depth 2
; LA64F-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64F-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64F-NEXT: movfr2gr.s $a1, $fa2
; LA64F-NEXT: movfr2gr.s $a2, $fa0
; LA64F-NEXT: .LBB35_3: # %atomicrmw.start
; LA64F-NEXT: # Parent Loop BB35_1 Depth=1
; LA64F-NEXT: # => This Inner Loop Header: Depth=2
; LA64F-NEXT: ll.w $a3, $a0, 0
; LA64F-NEXT: bne $a3, $a2, .LBB35_5
; LA64F-NEXT: # %bb.4: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB35_3 Depth=2
; LA64F-NEXT: move $a4, $a1
; LA64F-NEXT: sc.w $a4, $a0, 0
; LA64F-NEXT: beqz $a4, .LBB35_3
; LA64F-NEXT: b .LBB35_6
; LA64F-NEXT: .LBB35_5: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB35_1 Depth=1
; LA64F-NEXT: dbar 1792
; LA64F-NEXT: .LBB35_6: # %atomicrmw.start
; LA64F-NEXT: # in Loop: Header=BB35_1 Depth=1
; LA64F-NEXT: movgr2fr.w $fa0, $a3
; LA64F-NEXT: bne $a3, $a2, .LBB35_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: ret
;
; LA64D-LABEL: float_fmax_monotonic:
; LA64D: # %bb.0:
; LA64D-NEXT: fld.s $fa0, $a0, 0
; LA64D-NEXT: addi.w $a1, $zero, 1
; LA64D-NEXT: movgr2fr.w $fa1, $a1
; LA64D-NEXT: ffint.s.w $fa1, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB35_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Loop Header: Depth=1
; LA64D-NEXT: # Child Loop BB35_3 Depth 2
; LA64D-NEXT: fmax.s $fa2, $fa0, $fa0
; LA64D-NEXT: fmax.s $fa2, $fa2, $fa1
; LA64D-NEXT: movfr2gr.s $a1, $fa2
; LA64D-NEXT: movfr2gr.s $a2, $fa0
; LA64D-NEXT: .LBB35_3: # %atomicrmw.start
; LA64D-NEXT: # Parent Loop BB35_1 Depth=1
; LA64D-NEXT: # => This Inner Loop Header: Depth=2
; LA64D-NEXT: ll.w $a3, $a0, 0
; LA64D-NEXT: bne $a3, $a2, .LBB35_5
; LA64D-NEXT: # %bb.4: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB35_3 Depth=2
; LA64D-NEXT: move $a4, $a1
; LA64D-NEXT: sc.w $a4, $a0, 0
; LA64D-NEXT: beqz $a4, .LBB35_3
; LA64D-NEXT: b .LBB35_6
; LA64D-NEXT: .LBB35_5: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB35_1 Depth=1
; LA64D-NEXT: dbar 1792
; LA64D-NEXT: .LBB35_6: # %atomicrmw.start
; LA64D-NEXT: # in Loop: Header=BB35_1 Depth=1
; LA64D-NEXT: movgr2fr.w $fa0, $a3
; LA64D-NEXT: bne $a3, $a2, .LBB35_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, float 1.0 monotonic, align 4
ret float %v
}
define double @double_fadd_monotonic(ptr %p) nounwind {
; LA64F-LABEL: double_fadd_monotonic:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB36_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: move $a4, $zero
; LA64F-NEXT: move $a5, $zero
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB36_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fadd_monotonic:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB36_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: move $a4, $zero
; LA64D-NEXT: move $a5, $zero
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB36_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fadd ptr %p, double 1.0 monotonic, align 4
ret double %v
}
define double @double_fsub_monotonic(ptr %p) nounwind {
; LA64F-LABEL: double_fsub_monotonic:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, -1025
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB37_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(__adddf3)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: move $a4, $zero
; LA64F-NEXT: move $a5, $zero
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB37_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fsub_monotonic:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: pcalau12i $a0, %pc_hi20(.LCPI37_0)
; LA64D-NEXT: fld.d $fs0, $a0, %pc_lo12(.LCPI37_0)
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB37_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fadd.d $fa1, $fa0, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: move $a4, $zero
; LA64D-NEXT: move $a5, $zero
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB37_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fsub ptr %p, double 1.0 monotonic, align 4
ret double %v
}
define double @double_fmin_monotonic(ptr %p) nounwind {
; LA64F-LABEL: double_fmin_monotonic:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB38_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmin)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: move $a4, $zero
; LA64F-NEXT: move $a5, $zero
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB38_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmin_monotonic:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB38_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmin.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: move $a4, $zero
; LA64D-NEXT: move $a5, $zero
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB38_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmin ptr %p, double 1.0 monotonic, align 4
ret double %v
}
define double @double_fmax_monotonic(ptr %p) nounwind {
; LA64F-LABEL: double_fmax_monotonic:
; LA64F: # %bb.0:
; LA64F-NEXT: addi.d $sp, $sp, -48
; LA64F-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64F-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s0, $sp, 24 # 8-byte Folded Spill
; LA64F-NEXT: st.d $s1, $sp, 16 # 8-byte Folded Spill
; LA64F-NEXT: move $fp, $a0
; LA64F-NEXT: ld.d $s1, $a0, 0
; LA64F-NEXT: lu52i.d $s0, $zero, 1023
; LA64F-NEXT: .p2align 4, , 16
; LA64F-NEXT: .LBB39_1: # %atomicrmw.start
; LA64F-NEXT: # =>This Inner Loop Header: Depth=1
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: move $a1, $s0
; LA64F-NEXT: bl %plt(fmax)
; LA64F-NEXT: st.d $s1, $sp, 8
; LA64F-NEXT: st.d $a0, $sp, 0
; LA64F-NEXT: ori $a0, $zero, 8
; LA64F-NEXT: addi.d $a2, $sp, 8
; LA64F-NEXT: addi.d $a3, $sp, 0
; LA64F-NEXT: move $a1, $fp
; LA64F-NEXT: move $a4, $zero
; LA64F-NEXT: move $a5, $zero
; LA64F-NEXT: bl %plt(__atomic_compare_exchange)
; LA64F-NEXT: ld.d $s1, $sp, 8
; LA64F-NEXT: beqz $a0, .LBB39_1
; LA64F-NEXT: # %bb.2: # %atomicrmw.end
; LA64F-NEXT: move $a0, $s1
; LA64F-NEXT: ld.d $s1, $sp, 16 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $s0, $sp, 24 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64F-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64F-NEXT: addi.d $sp, $sp, 48
; LA64F-NEXT: ret
;
; LA64D-LABEL: double_fmax_monotonic:
; LA64D: # %bb.0:
; LA64D-NEXT: addi.d $sp, $sp, -48
; LA64D-NEXT: st.d $ra, $sp, 40 # 8-byte Folded Spill
; LA64D-NEXT: st.d $fp, $sp, 32 # 8-byte Folded Spill
; LA64D-NEXT: fst.d $fs0, $sp, 24 # 8-byte Folded Spill
; LA64D-NEXT: move $fp, $a0
; LA64D-NEXT: fld.d $fa0, $a0, 0
; LA64D-NEXT: addi.d $a0, $zero, 1
; LA64D-NEXT: movgr2fr.d $fa1, $a0
; LA64D-NEXT: ffint.d.l $fs0, $fa1
; LA64D-NEXT: .p2align 4, , 16
; LA64D-NEXT: .LBB39_1: # %atomicrmw.start
; LA64D-NEXT: # =>This Inner Loop Header: Depth=1
; LA64D-NEXT: fmax.d $fa1, $fa0, $fa0
; LA64D-NEXT: fmax.d $fa1, $fa1, $fs0
; LA64D-NEXT: fst.d $fa0, $sp, 16
; LA64D-NEXT: fst.d $fa1, $sp, 8
; LA64D-NEXT: ori $a0, $zero, 8
; LA64D-NEXT: addi.d $a2, $sp, 16
; LA64D-NEXT: addi.d $a3, $sp, 8
; LA64D-NEXT: move $a1, $fp
; LA64D-NEXT: move $a4, $zero
; LA64D-NEXT: move $a5, $zero
; LA64D-NEXT: bl %plt(__atomic_compare_exchange)
; LA64D-NEXT: fld.d $fa0, $sp, 16
; LA64D-NEXT: beqz $a0, .LBB39_1
; LA64D-NEXT: # %bb.2: # %atomicrmw.end
; LA64D-NEXT: fld.d $fs0, $sp, 24 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $fp, $sp, 32 # 8-byte Folded Reload
; LA64D-NEXT: ld.d $ra, $sp, 40 # 8-byte Folded Reload
; LA64D-NEXT: addi.d $sp, $sp, 48
; LA64D-NEXT: ret
%v = atomicrmw fmax ptr %p, double 1.0 monotonic, align 4
ret double %v
}