llvm/llvm/test/CodeGen/PowerPC/disable-ctr-ppcf128.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mcpu=pwr9 -verify-machineinstrs -ppc-asm-full-reg-names \
; RUN:	  -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P9LE
; RUN: llc -mcpu=pwr9 -verify-machineinstrs -ppc-asm-full-reg-names \
; RUN:    -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P9BE
; RUN: llc -mcpu=pwr8 -verify-machineinstrs -ppc-asm-full-reg-names \
; RUN:    -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8LE
; RUN: llc -mcpu=pwr8 -verify-machineinstrs -ppc-asm-full-reg-names \
; RUN:    -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefix=P8BE

declare ppc_fp128 @llvm.fmuladd.ppcf128(ppc_fp128, ppc_fp128, ppc_fp128) #2

define ppc_fp128 @test_ctr0() {
; P9LE-LABEL: test_ctr0:
; P9LE:       # %bb.0: # %bb
; P9LE-NEXT:    mflr r0
; P9LE-NEXT:    .cfi_def_cfa_offset 48
; P9LE-NEXT:    .cfi_offset lr, 16
; P9LE-NEXT:    .cfi_offset r30, -16
; P9LE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
; P9LE-NEXT:    stdu r1, -48(r1)
; P9LE-NEXT:    li r3, 1
; P9LE-NEXT:    xxlxor f1, f1, f1
; P9LE-NEXT:    xxlxor f2, f2, f2
; P9LE-NEXT:    std r0, 64(r1)
; P9LE-NEXT:    rldic r30, r3, 62, 1
; P9LE-NEXT:    .p2align 5
; P9LE-NEXT:  .LBB0_1: # %bb6
; P9LE-NEXT:    #
; P9LE-NEXT:    xxlxor f3, f3, f3
; P9LE-NEXT:    xxlxor f4, f4, f4
; P9LE-NEXT:    bl __gcc_qadd
; P9LE-NEXT:    nop
; P9LE-NEXT:    addi r30, r30, -1
; P9LE-NEXT:    cmpldi r30, 0
; P9LE-NEXT:    bc 12, gt, .LBB0_1
; P9LE-NEXT:  # %bb.2: # %bb14
; P9LE-NEXT:    addi r1, r1, 48
; P9LE-NEXT:    ld r0, 16(r1)
; P9LE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
; P9LE-NEXT:    mtlr r0
; P9LE-NEXT:    blr
;
; P9BE-LABEL: test_ctr0:
; P9BE:       # %bb.0: # %bb
; P9BE-NEXT:    mflr r0
; P9BE-NEXT:    stdu r1, -128(r1)
; P9BE-NEXT:    std r0, 144(r1)
; P9BE-NEXT:    .cfi_def_cfa_offset 128
; P9BE-NEXT:    .cfi_offset lr, 16
; P9BE-NEXT:    .cfi_offset r30, -16
; P9BE-NEXT:    li r3, 1
; P9BE-NEXT:    std r30, 112(r1) # 8-byte Folded Spill
; P9BE-NEXT:    xxlxor f1, f1, f1
; P9BE-NEXT:    rldic r30, r3, 62, 1
; P9BE-NEXT:    xxlxor f2, f2, f2
; P9BE-NEXT:    .p2align 5
; P9BE-NEXT:  .LBB0_1: # %bb6
; P9BE-NEXT:    #
; P9BE-NEXT:    xxlxor f3, f3, f3
; P9BE-NEXT:    xxlxor f4, f4, f4
; P9BE-NEXT:    bl __gcc_qadd
; P9BE-NEXT:    nop
; P9BE-NEXT:    addi r30, r30, -1
; P9BE-NEXT:    cmpldi r30, 0
; P9BE-NEXT:    bc 12, gt, .LBB0_1
; P9BE-NEXT:  # %bb.2: # %bb14
; P9BE-NEXT:    ld r30, 112(r1) # 8-byte Folded Reload
; P9BE-NEXT:    addi r1, r1, 128
; P9BE-NEXT:    ld r0, 16(r1)
; P9BE-NEXT:    mtlr r0
; P9BE-NEXT:    blr
;
; P8LE-LABEL: test_ctr0:
; P8LE:       # %bb.0: # %bb
; P8LE-NEXT:    mflr r0
; P8LE-NEXT:    .cfi_def_cfa_offset 48
; P8LE-NEXT:    .cfi_offset lr, 16
; P8LE-NEXT:    .cfi_offset r30, -16
; P8LE-NEXT:    std r30, -16(r1) # 8-byte Folded Spill
; P8LE-NEXT:    stdu r1, -48(r1)
; P8LE-NEXT:    li r3, 1
; P8LE-NEXT:    xxlxor f1, f1, f1
; P8LE-NEXT:    xxlxor f2, f2, f2
; P8LE-NEXT:    std r0, 64(r1)
; P8LE-NEXT:    rldic r30, r3, 62, 1
; P8LE-NEXT:    .p2align 5
; P8LE-NEXT:  .LBB0_1: # %bb6
; P8LE-NEXT:    #
; P8LE-NEXT:    xxlxor f3, f3, f3
; P8LE-NEXT:    xxlxor f4, f4, f4
; P8LE-NEXT:    bl __gcc_qadd
; P8LE-NEXT:    nop
; P8LE-NEXT:    addi r30, r30, -1
; P8LE-NEXT:    cmpldi r30, 0
; P8LE-NEXT:    bc 12, gt, .LBB0_1
; P8LE-NEXT:  # %bb.2: # %bb14
; P8LE-NEXT:    addi r1, r1, 48
; P8LE-NEXT:    ld r0, 16(r1)
; P8LE-NEXT:    ld r30, -16(r1) # 8-byte Folded Reload
; P8LE-NEXT:    mtlr r0
; P8LE-NEXT:    blr
;
; P8BE-LABEL: test_ctr0:
; P8BE:       # %bb.0: # %bb
; P8BE-NEXT:    mflr r0
; P8BE-NEXT:    stdu r1, -128(r1)
; P8BE-NEXT:    std r0, 144(r1)
; P8BE-NEXT:    .cfi_def_cfa_offset 128
; P8BE-NEXT:    .cfi_offset lr, 16
; P8BE-NEXT:    .cfi_offset r30, -16
; P8BE-NEXT:    li r3, 1
; P8BE-NEXT:    std r30, 112(r1) # 8-byte Folded Spill
; P8BE-NEXT:    xxlxor f1, f1, f1
; P8BE-NEXT:    xxlxor f2, f2, f2
; P8BE-NEXT:    rldic r30, r3, 62, 1
; P8BE-NEXT:    .p2align 5
; P8BE-NEXT:  .LBB0_1: # %bb6
; P8BE-NEXT:    #
; P8BE-NEXT:    xxlxor f3, f3, f3
; P8BE-NEXT:    xxlxor f4, f4, f4
; P8BE-NEXT:    bl __gcc_qadd
; P8BE-NEXT:    nop
; P8BE-NEXT:    addi r30, r30, -1
; P8BE-NEXT:    cmpldi r30, 0
; P8BE-NEXT:    bc 12, gt, .LBB0_1
; P8BE-NEXT:  # %bb.2: # %bb14
; P8BE-NEXT:    ld r30, 112(r1) # 8-byte Folded Reload
; P8BE-NEXT:    addi r1, r1, 128
; P8BE-NEXT:    ld r0, 16(r1)
; P8BE-NEXT:    mtlr r0
; P8BE-NEXT:    blr
bb:
  br label %bb6

bb6:                                              ; preds = %bb6, %bb
  %i = phi ppc_fp128 [ %i8, %bb6 ], [ 0xM00000000000000000000000000000000, %bb ]
  %i7 = phi i64 [ %i9, %bb6 ], [ 0, %bb ]
  %i8 = tail call ppc_fp128 @llvm.fmuladd.ppcf128(ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 0xM00000000000000000000000000000000, ppc_fp128 %i) #4
  %i9 = add i64 %i7, -4
  %i10 = icmp eq i64 %i9, 0
  br i1 %i10, label %bb14, label %bb6

bb14:                                             ; preds = %bb6
  ret ppc_fp128 %i8
}