; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec \
; RUN: -mtriple powerpc-ibm-aix-xcoff < %s | \
; RUN: FileCheck --check-prefixes=CHECKASM,ASM32PWR4 %s
; RUN: llc -verify-machineinstrs -mcpu=pwr4 -mattr=-altivec \
; RUN: -mtriple powerpc64-ibm-aix-xcoff < %s | \
; RUN: FileCheck --check-prefixes=CHECKASM,ASM64PWR4 %s
define void @call_test_chars() {
; ASM32PWR4-LABEL: call_test_chars:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: li 3, 97
; ASM32PWR4-NEXT: li 4, 97
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: li 5, 97
; ASM32PWR4-NEXT: li 6, 97
; ASM32PWR4-NEXT: bl .test_chars
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_chars:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -112(1)
; ASM64PWR4-NEXT: li 3, 97
; ASM64PWR4-NEXT: li 4, 97
; ASM64PWR4-NEXT: std 0, 128(1)
; ASM64PWR4-NEXT: li 5, 97
; ASM64PWR4-NEXT: li 6, 97
; ASM64PWR4-NEXT: bl .test_chars
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 112
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
call i8 @test_chars(i8 signext 97, i8 signext 97, i8 signext 97, i8 signext 97)
ret void
}
define signext i8 @test_chars(i8 signext %c1, i8 signext %c2, i8 signext %c3, i8 signext %c4) {
; CHECKASM-LABEL: test_chars:
; CHECKASM: # %bb.0: # %entry
; CHECKASM-NEXT: add 3, 3, 4
; CHECKASM-NEXT: add 3, 3, 5
; CHECKASM-NEXT: add 3, 3, 6
; CHECKASM-NEXT: extsb 3, 3
; CHECKASM-NEXT: blr
entry:
%conv = sext i8 %c1 to i32
%conv1 = sext i8 %c2 to i32
%add = add nsw i32 %conv, %conv1
%conv2 = sext i8 %c3 to i32
%add3 = add nsw i32 %add, %conv2
%conv4 = sext i8 %c4 to i32
%add5 = add nsw i32 %add3, %conv4
%conv6 = trunc i32 %add5 to i8
ret i8 %conv6
}
define void @call_test_chars_mix() {
; ASM32PWR4-LABEL: call_test_chars_mix:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: li 3, 97
; ASM32PWR4-NEXT: li 4, 225
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: li 5, 97
; ASM32PWR4-NEXT: li 6, -31
; ASM32PWR4-NEXT: bl .test_chars_mix
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_chars_mix:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -112(1)
; ASM64PWR4-NEXT: li 3, 97
; ASM64PWR4-NEXT: li 4, 225
; ASM64PWR4-NEXT: std 0, 128(1)
; ASM64PWR4-NEXT: li 5, 97
; ASM64PWR4-NEXT: li 6, -31
; ASM64PWR4-NEXT: bl .test_chars_mix
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 112
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
call i8 @test_chars_mix(i8 signext 97, i8 zeroext -31, i8 zeroext 97, i8 signext -31)
ret void
}
define signext i8 @test_chars_mix(i8 signext %c1, i8 zeroext %c2, i8 zeroext %c3, i8 signext %c4) {
; CHECKASM-LABEL: test_chars_mix:
; CHECKASM: # %bb.0: # %entry
; CHECKASM-NEXT: add 3, 3, 4
; CHECKASM-NEXT: add 3, 3, 5
; CHECKASM-NEXT: add 3, 3, 6
; CHECKASM-NEXT: extsb 3, 3
; CHECKASM-NEXT: blr
entry:
%conv = sext i8 %c1 to i32
%conv1 = zext i8 %c2 to i32
%add = add nsw i32 %conv, %conv1
%conv2 = zext i8 %c3 to i32
%add3 = add nsw i32 %add, %conv2
%conv4 = sext i8 %c4 to i32
%add5 = add nsw i32 %add3, %conv4
%conv6 = trunc i32 %add5 to i8
ret i8 %conv6
}
@global_i1 = global i8 0, align 1
define void @test_i1(i1 %b) {
; ASM32PWR4-LABEL: test_i1:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: lwz 4, L..C0(2) # @global_i1
; ASM32PWR4-NEXT: clrlwi 3, 3, 31
; ASM32PWR4-NEXT: stb 3, 0(4)
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: test_i1:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: ld 4, L..C0(2) # @global_i1
; ASM64PWR4-NEXT: clrlwi 3, 3, 31
; ASM64PWR4-NEXT: stb 3, 0(4)
; ASM64PWR4-NEXT: blr
entry:
%frombool = zext i1 %b to i8
store i8 %frombool, ptr @global_i1, align 1
ret void
}
define void @call_test_i1() {
; ASM32PWR4-LABEL: call_test_i1:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: bl .test_i1
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_i1:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -112(1)
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: std 0, 128(1)
; ASM64PWR4-NEXT: bl .test_i1
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 112
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
call void @test_i1(i1 1)
ret void
}
define void @test_i1zext(i1 zeroext %b) {
; ASM32PWR4-LABEL: test_i1zext:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: lwz 4, L..C0(2) # @global_i1
; ASM32PWR4-NEXT: stb 3, 0(4)
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: test_i1zext:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: ld 4, L..C0(2) # @global_i1
; ASM64PWR4-NEXT: stb 3, 0(4)
; ASM64PWR4-NEXT: blr
entry:
%frombool = zext i1 %b to i8
store i8 %frombool, ptr @global_i1, align 1
ret void
}
define i32 @test_ints(i32 signext %a, i32 zeroext %b, i32 zeroext %c, i32 signext %d, i32 signext %e, i32 signext %f, i32 signext %g, i32 signext %h) {
; CHECKASM-LABEL: test_ints:
; CHECKASM: # %bb.0: # %entry
; CHECKASM-NEXT: add 3, 3, 4
; CHECKASM-NEXT: add 3, 3, 5
; CHECKASM-NEXT: add 3, 3, 6
; CHECKASM-NEXT: add 3, 3, 7
; CHECKASM-NEXT: add 3, 3, 8
; CHECKASM-NEXT: add 3, 3, 9
; CHECKASM-NEXT: add 3, 3, 10
; CHECKASM-NEXT: blr
entry:
%add = add i32 %a, %b
%add1 = add i32 %add, %c
%add2 = add i32 %add1, %d
%add3 = add i32 %add2, %e
%add4 = add i32 %add3, %f
%add5 = add i32 %add4, %g
%add6 = add i32 %add5, %h
ret i32 %add6
}
define void @call_test_ints() {
; ASM32PWR4-LABEL: call_test_ints:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: li 4, 1
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: lis 5, -32768
; ASM32PWR4-NEXT: lis 6, -32768
; ASM32PWR4-NEXT: li 7, 1
; ASM32PWR4-NEXT: li 8, 1
; ASM32PWR4-NEXT: li 9, 1
; ASM32PWR4-NEXT: li 10, 1
; ASM32PWR4-NEXT: bl .test_ints
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_ints:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -112(1)
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: li 4, 1
; ASM64PWR4-NEXT: std 0, 128(1)
; ASM64PWR4-NEXT: rldic 5, 3, 31, 32
; ASM64PWR4-NEXT: lis 6, -32768
; ASM64PWR4-NEXT: li 7, 1
; ASM64PWR4-NEXT: li 8, 1
; ASM64PWR4-NEXT: li 9, 1
; ASM64PWR4-NEXT: li 10, 1
; ASM64PWR4-NEXT: bl .test_ints
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 112
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
call i32 @test_ints(i32 signext 1, i32 zeroext 1, i32 zeroext 2147483648, i32 signext -2147483648, i32 signext 1, i32 signext 1, i32 signext 1, i32 signext 1)
ret void
}
define void @call_test_i64() {
; ASM32PWR4-LABEL: call_test_i64:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: li 3, 0
; ASM32PWR4-NEXT: li 4, 1
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: li 5, 0
; ASM32PWR4-NEXT: li 6, 2
; ASM32PWR4-NEXT: li 7, 0
; ASM32PWR4-NEXT: li 8, 3
; ASM32PWR4-NEXT: li 9, 0
; ASM32PWR4-NEXT: li 10, 4
; ASM32PWR4-NEXT: bl .test_i64
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_i64:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -112(1)
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: li 4, 2
; ASM64PWR4-NEXT: std 0, 128(1)
; ASM64PWR4-NEXT: li 5, 3
; ASM64PWR4-NEXT: li 6, 4
; ASM64PWR4-NEXT: bl .test_i64
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 112
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
call i64 @test_i64(i64 1, i64 2, i64 3, i64 4)
ret void
}
define i64 @test_i64(i64 %a, i64 %b, i64 %c, i64 %d) {
; ASM32PWR4-LABEL: test_i64:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: addc 4, 4, 6
; ASM32PWR4-NEXT: adde 3, 3, 5
; ASM32PWR4-NEXT: addc 4, 4, 8
; ASM32PWR4-NEXT: adde 3, 3, 7
; ASM32PWR4-NEXT: addc 4, 4, 10
; ASM32PWR4-NEXT: adde 3, 3, 9
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: test_i64:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: add 3, 3, 4
; ASM64PWR4-NEXT: add 3, 3, 5
; ASM64PWR4-NEXT: add 3, 3, 6
; ASM64PWR4-NEXT: blr
entry:
%add = add nsw i64 %a, %b
%add1 = add nsw i64 %add, %c
%add2 = add nsw i64 %add1, %d
ret i64 %add2
}
define void @call_test_int_ptr() {
; ASM32PWR4-LABEL: call_test_int_ptr:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: li 3, 0
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: stw 3, 60(1)
; ASM32PWR4-NEXT: addi 3, 1, 60
; ASM32PWR4-NEXT: bl .test_int_ptr
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_int_ptr:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: li 3, 0
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: stw 3, 124(1)
; ASM64PWR4-NEXT: addi 3, 1, 124
; ASM64PWR4-NEXT: bl .test_int_ptr
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%b = alloca i32, align 4
store i32 0, ptr %b, align 4
call void @test_int_ptr(ptr %b)
ret void
}
define void @test_int_ptr(ptr %a) {
; ASM32PWR4-LABEL: test_int_ptr:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: stw 3, -8(1)
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: test_int_ptr:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: std 3, -8(1)
; ASM64PWR4-NEXT: blr
entry:
%a.addr = alloca ptr, align 8
store ptr %a, ptr %a.addr, align 8
ret void
}
define i32 @caller(i32 %i) {
; ASM32PWR4-LABEL: caller:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: stw 3, 60(1)
; ASM32PWR4-NEXT: cntlzw 3, 3
; ASM32PWR4-NEXT: not 3, 3
; ASM32PWR4-NEXT: rlwinm 3, 3, 27, 31, 31
; ASM32PWR4-NEXT: stb 3, 59(1)
; ASM32PWR4-NEXT: bl .call_test_bool[PR]
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: caller:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: stw 3, 124(1)
; ASM64PWR4-NEXT: cntlzw 3, 3
; ASM64PWR4-NEXT: srwi 3, 3, 5
; ASM64PWR4-NEXT: xori 3, 3, 1
; ASM64PWR4-NEXT: stb 3, 123(1)
; ASM64PWR4-NEXT: bl .call_test_bool[PR]
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%i.addr = alloca i32, align 4
%b = alloca i8, align 1
store i32 %i, ptr %i.addr, align 4
%0 = load i32, ptr %i.addr, align 4
%cmp = icmp ne i32 %0, 0
%frombool = zext i1 %cmp to i8
store i8 %frombool, ptr %b, align 1
%1 = load i8, ptr %b, align 1
%tobool = trunc i8 %1 to i1
%call = call i32 @call_test_bool(i1 zeroext %tobool)
ret i32 %call
}
declare i32 @call_test_bool(i1 zeroext)
@f1 = global float 0.000000e+00, align 4
@d1 = global double 0.000000e+00, align 8
define void @call_test_floats() {
; ASM32PWR4-LABEL: call_test_floats:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: lfs 1, 0(3)
; ASM32PWR4-NEXT: fmr 2, 1
; ASM32PWR4-NEXT: fmr 3, 1
; ASM32PWR4-NEXT: bl .test_floats
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_floats:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -112(1)
; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1
; ASM64PWR4-NEXT: std 0, 128(1)
; ASM64PWR4-NEXT: lfs 1, 0(3)
; ASM64PWR4-NEXT: fmr 2, 1
; ASM64PWR4-NEXT: fmr 3, 1
; ASM64PWR4-NEXT: bl .test_floats
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 112
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load float, ptr @f1, align 4
call float @test_floats(float %0, float %0, float %0)
ret void
}
define float @test_floats(float %f1, float %f2, float %f3) {
; CHECKASM-LABEL: test_floats:
; CHECKASM: # %bb.0: # %entry
; CHECKASM-NEXT: fadds 0, 1, 2
; CHECKASM-NEXT: fadds 1, 0, 3
; CHECKASM-NEXT: blr
entry:
%add = fadd float %f1, %f2
%add1 = fadd float %add, %f3
ret float %add1
}
define void @call_test_fpr_max() {
; ASM32PWR4-LABEL: call_test_fpr_max:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -128(1)
; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1
; ASM32PWR4-NEXT: stw 0, 136(1)
; ASM32PWR4-NEXT: lfd 1, 0(3)
; ASM32PWR4-NEXT: fmr 2, 1
; ASM32PWR4-NEXT: fmr 3, 1
; ASM32PWR4-NEXT: stfd 1, 120(1)
; ASM32PWR4-NEXT: stfd 1, 112(1)
; ASM32PWR4-NEXT: fmr 4, 1
; ASM32PWR4-NEXT: fmr 5, 1
; ASM32PWR4-NEXT: stfd 1, 104(1)
; ASM32PWR4-NEXT: fmr 6, 1
; ASM32PWR4-NEXT: fmr 7, 1
; ASM32PWR4-NEXT: stfd 1, 96(1)
; ASM32PWR4-NEXT: stfd 1, 88(1)
; ASM32PWR4-NEXT: fmr 8, 1
; ASM32PWR4-NEXT: fmr 9, 1
; ASM32PWR4-NEXT: stfd 1, 80(1)
; ASM32PWR4-NEXT: fmr 10, 1
; ASM32PWR4-NEXT: fmr 11, 1
; ASM32PWR4-NEXT: stfd 1, 72(1)
; ASM32PWR4-NEXT: stfd 1, 64(1)
; ASM32PWR4-NEXT: fmr 12, 1
; ASM32PWR4-NEXT: fmr 13, 1
; ASM32PWR4-NEXT: stfd 1, 56(1)
; ASM32PWR4-NEXT: bl .test_fpr_max
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 128
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_fpr_max:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -160(1)
; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1
; ASM64PWR4-NEXT: std 0, 176(1)
; ASM64PWR4-NEXT: lfd 1, 0(3)
; ASM64PWR4-NEXT: fmr 2, 1
; ASM64PWR4-NEXT: fmr 3, 1
; ASM64PWR4-NEXT: stfd 1, 144(1)
; ASM64PWR4-NEXT: stfd 1, 136(1)
; ASM64PWR4-NEXT: fmr 4, 1
; ASM64PWR4-NEXT: fmr 5, 1
; ASM64PWR4-NEXT: stfd 1, 128(1)
; ASM64PWR4-NEXT: fmr 6, 1
; ASM64PWR4-NEXT: fmr 7, 1
; ASM64PWR4-NEXT: stfd 1, 120(1)
; ASM64PWR4-NEXT: stfd 1, 112(1)
; ASM64PWR4-NEXT: fmr 8, 1
; ASM64PWR4-NEXT: fmr 9, 1
; ASM64PWR4-NEXT: fmr 10, 1
; ASM64PWR4-NEXT: fmr 11, 1
; ASM64PWR4-NEXT: fmr 12, 1
; ASM64PWR4-NEXT: fmr 13, 1
; ASM64PWR4-NEXT: bl .test_fpr_max
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 160
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load double, ptr @d1, align 8
call double @test_fpr_max(double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0, double %0)
ret void
}
define double @test_fpr_max(double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, double %d8, double %d9, double %d10, double %d11, double %d12, double %d13) {
; CHECKASM-LABEL: test_fpr_max:
; CHECKASM: # %bb.0: # %entry
; CHECKASM-NEXT: fadd 0, 1, 2
; CHECKASM-NEXT: fadd 0, 0, 3
; CHECKASM-NEXT: fadd 0, 0, 4
; CHECKASM-NEXT: fadd 0, 0, 5
; CHECKASM-NEXT: fadd 0, 0, 6
; CHECKASM-NEXT: fadd 0, 0, 7
; CHECKASM-NEXT: fadd 0, 0, 8
; CHECKASM-NEXT: fadd 0, 0, 9
; CHECKASM-NEXT: fadd 0, 0, 10
; CHECKASM-NEXT: fadd 0, 0, 11
; CHECKASM-NEXT: fadd 0, 0, 12
; CHECKASM-NEXT: fadd 1, 0, 13
; CHECKASM-NEXT: blr
entry:
%add = fadd double %d1, %d2
%add1 = fadd double %add, %d3
%add2 = fadd double %add1, %d4
%add3 = fadd double %add2, %d5
%add4 = fadd double %add3, %d6
%add5 = fadd double %add4, %d7
%add6 = fadd double %add5, %d8
%add7 = fadd double %add6, %d9
%add8 = fadd double %add7, %d10
%add9 = fadd double %add8, %d11
%add10 = fadd double %add9, %d12
%add11 = fadd double %add10, %d13
ret double %add11
}
define void @call_test_mix() {
; ASM32PWR4-LABEL: call_test_mix:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: li 4, 1
; ASM32PWR4-NEXT: li 7, 97
; ASM32PWR4-NEXT: lfs 1, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1
; ASM32PWR4-NEXT: lfd 2, 0(3)
; ASM32PWR4-NEXT: bl .test_mix
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_mix:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -112(1)
; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1
; ASM64PWR4-NEXT: std 0, 128(1)
; ASM64PWR4-NEXT: li 4, 1
; ASM64PWR4-NEXT: li 6, 97
; ASM64PWR4-NEXT: lfs 1, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1
; ASM64PWR4-NEXT: lfd 2, 0(3)
; ASM64PWR4-NEXT: bl .test_mix
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 112
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load float, ptr @f1, align 4
%1 = load double, ptr @d1, align 8
call i32 @test_mix(float %0, i32 1, double %1, i8 signext 97)
ret void
}
define i32 @test_mix(float %f, i32 signext %i, double %d, i8 signext %c) {
; ASM32PWR4-LABEL: test_mix:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: lis 3, 17200
; ASM32PWR4-NEXT: fadd 1, 1, 2
; ASM32PWR4-NEXT: stw 3, -16(1)
; ASM32PWR4-NEXT: lwz 3, L..C3(2) # %const.0
; ASM32PWR4-NEXT: frsp 1, 1
; ASM32PWR4-NEXT: lfs 0, 0(3)
; ASM32PWR4-NEXT: clrlwi 3, 7, 24
; ASM32PWR4-NEXT: add 3, 4, 3
; ASM32PWR4-NEXT: xoris 3, 3, 32768
; ASM32PWR4-NEXT: stw 3, -12(1)
; ASM32PWR4-NEXT: addi 3, 1, -4
; ASM32PWR4-NEXT: lfd 2, -16(1)
; ASM32PWR4-NEXT: fsub 0, 2, 0
; ASM32PWR4-NEXT: frsp 0, 0
; ASM32PWR4-NEXT: fadds 0, 0, 1
; ASM32PWR4-NEXT: fctiwz 0, 0
; ASM32PWR4-NEXT: stfiwx 0, 0, 3
; ASM32PWR4-NEXT: lwz 3, -4(1)
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: test_mix:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: clrlwi 5, 6, 24
; ASM64PWR4-NEXT: fadd 0, 1, 2
; ASM64PWR4-NEXT: addi 3, 1, -4
; ASM64PWR4-NEXT: frsp 0, 0
; ASM64PWR4-NEXT: add 4, 4, 5
; ASM64PWR4-NEXT: extsw 4, 4
; ASM64PWR4-NEXT: std 4, -16(1)
; ASM64PWR4-NEXT: lfd 1, -16(1)
; ASM64PWR4-NEXT: fcfid 1, 1
; ASM64PWR4-NEXT: frsp 1, 1
; ASM64PWR4-NEXT: fadds 0, 1, 0
; ASM64PWR4-NEXT: fctiwz 0, 0
; ASM64PWR4-NEXT: stfiwx 0, 0, 3
; ASM64PWR4-NEXT: lwz 3, -4(1)
; ASM64PWR4-NEXT: blr
entry:
%conv = fpext float %f to double
%add = fadd double %conv, %d
%conv1 = fptrunc double %add to float
%conv2 = zext i8 %c to i32
%add3 = add nsw i32 %i, %conv2
%conv4 = sitofp i32 %add3 to float
%add5 = fadd float %conv4, %conv1
%conv6 = fptosi float %add5 to i32
ret i32 %conv6
}
define i64 @callee_mixed_ints(i32 %a, i8 signext %b, i32 %c, i16 signext %d, i64 %e) {
; ASM32PWR4-LABEL: callee_mixed_ints:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: clrlwi 4, 4, 24
; ASM32PWR4-NEXT: add 3, 3, 4
; ASM32PWR4-NEXT: add 3, 3, 5
; ASM32PWR4-NEXT: add 3, 3, 6
; ASM32PWR4-NEXT: srawi 5, 3, 31
; ASM32PWR4-NEXT: addc 4, 3, 8
; ASM32PWR4-NEXT: adde 3, 5, 7
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: callee_mixed_ints:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: clrlwi 4, 4, 24
; ASM64PWR4-NEXT: add 3, 3, 4
; ASM64PWR4-NEXT: add 3, 3, 5
; ASM64PWR4-NEXT: add 3, 3, 6
; ASM64PWR4-NEXT: extsw 3, 3
; ASM64PWR4-NEXT: add 3, 3, 7
; ASM64PWR4-NEXT: blr
entry:
%conv = zext i8 %b to i32
%add = add nsw i32 %a, %conv
%add1 = add nsw i32 %add, %c
%conv2 = sext i16 %d to i32
%add3 = add nsw i32 %add1, %conv2
%conv4 = sext i32 %add3 to i64
%add5 = add nsw i64 %conv4, %e
ret i64 %add5
}
define void @call_test_vararg() {
; ASM32PWR4-LABEL: call_test_vararg:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -80(1)
; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1
; ASM32PWR4-NEXT: stw 0, 88(1)
; ASM32PWR4-NEXT: lfs 1, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1
; ASM32PWR4-NEXT: stfd 1, 64(1)
; ASM32PWR4-NEXT: lfd 2, 0(3)
; ASM32PWR4-NEXT: li 3, 42
; ASM32PWR4-NEXT: stfd 2, 72(1)
; ASM32PWR4-NEXT: lwz 4, 64(1)
; ASM32PWR4-NEXT: lwz 5, 68(1)
; ASM32PWR4-NEXT: lwz 6, 72(1)
; ASM32PWR4-NEXT: lwz 7, 76(1)
; ASM32PWR4-NEXT: bl .test_vararg[PR]
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 80
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_vararg:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: lfs 1, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1
; ASM64PWR4-NEXT: stfd 1, 112(1)
; ASM64PWR4-NEXT: lfd 2, 0(3)
; ASM64PWR4-NEXT: li 3, 42
; ASM64PWR4-NEXT: stfd 2, 120(1)
; ASM64PWR4-NEXT: ld 4, 112(1)
; ASM64PWR4-NEXT: ld 5, 120(1)
; ASM64PWR4-NEXT: bl .test_vararg[PR]
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load float, ptr @f1, align 4
%conv = fpext float %0 to double
%1 = load double, ptr @d1, align 8
call void (i32, ...) @test_vararg(i32 42, double %conv, double %1)
ret void
}
declare void @test_vararg(i32, ...)
define void @call_test_vararg2() {
; ASM32PWR4-LABEL: call_test_vararg2:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -80(1)
; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1
; ASM32PWR4-NEXT: stw 0, 88(1)
; ASM32PWR4-NEXT: li 6, 42
; ASM32PWR4-NEXT: lfs 1, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1
; ASM32PWR4-NEXT: stfd 1, 64(1)
; ASM32PWR4-NEXT: lfd 2, 0(3)
; ASM32PWR4-NEXT: li 3, 42
; ASM32PWR4-NEXT: stfd 2, 72(1)
; ASM32PWR4-NEXT: lwz 4, 64(1)
; ASM32PWR4-NEXT: lwz 5, 68(1)
; ASM32PWR4-NEXT: lwz 7, 72(1)
; ASM32PWR4-NEXT: lwz 8, 76(1)
; ASM32PWR4-NEXT: bl .test_vararg[PR]
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 80
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_vararg2:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: li 5, 42
; ASM64PWR4-NEXT: lfs 1, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1
; ASM64PWR4-NEXT: stfd 1, 112(1)
; ASM64PWR4-NEXT: lfd 2, 0(3)
; ASM64PWR4-NEXT: li 3, 42
; ASM64PWR4-NEXT: stfd 2, 120(1)
; ASM64PWR4-NEXT: ld 4, 112(1)
; ASM64PWR4-NEXT: ld 6, 120(1)
; ASM64PWR4-NEXT: bl .test_vararg[PR]
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load float, ptr @f1, align 4
%conv = fpext float %0 to double
%1 = load double, ptr @d1, align 8
call void (i32, ...) @test_vararg(i32 42, double %conv, i32 42, double %1)
ret void
}
define void @call_test_vararg3() {
; ASM32PWR4-LABEL: call_test_vararg3:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -80(1)
; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1
; ASM32PWR4-NEXT: stw 0, 88(1)
; ASM32PWR4-NEXT: li 6, 0
; ASM32PWR4-NEXT: li 7, 42
; ASM32PWR4-NEXT: lfs 1, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C2(2) # @d1
; ASM32PWR4-NEXT: stfd 1, 64(1)
; ASM32PWR4-NEXT: lfd 2, 0(3)
; ASM32PWR4-NEXT: li 3, 42
; ASM32PWR4-NEXT: stfd 2, 72(1)
; ASM32PWR4-NEXT: lwz 4, 64(1)
; ASM32PWR4-NEXT: lwz 5, 68(1)
; ASM32PWR4-NEXT: lwz 8, 72(1)
; ASM32PWR4-NEXT: lwz 9, 76(1)
; ASM32PWR4-NEXT: bl .test_vararg[PR]
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 80
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_vararg3:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: li 5, 42
; ASM64PWR4-NEXT: lfs 1, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C2(2) # @d1
; ASM64PWR4-NEXT: stfd 1, 112(1)
; ASM64PWR4-NEXT: lfd 2, 0(3)
; ASM64PWR4-NEXT: li 3, 42
; ASM64PWR4-NEXT: stfd 2, 120(1)
; ASM64PWR4-NEXT: ld 4, 112(1)
; ASM64PWR4-NEXT: ld 6, 120(1)
; ASM64PWR4-NEXT: bl .test_vararg[PR]
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load float, ptr @f1, align 4
%conv = fpext float %0 to double
%1 = load double, ptr @d1, align 8
call void (i32, ...) @test_vararg(i32 42, double %conv, i64 42, double %1)
ret void
}
define void @call_test_vararg4() {
; ASM32PWR4-LABEL: call_test_vararg4:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: lwz 3, L..C1(2) # @f1
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: lfs 1, 0(3)
; ASM32PWR4-NEXT: li 3, 42
; ASM32PWR4-NEXT: stfs 1, 60(1)
; ASM32PWR4-NEXT: lwz 4, 60(1)
; ASM32PWR4-NEXT: bl .test_vararg[PR]
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_vararg4:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: ld 3, L..C1(2) # @f1
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: lfs 1, 0(3)
; ASM64PWR4-NEXT: li 3, 42
; ASM64PWR4-NEXT: stfs 1, 124(1)
; ASM64PWR4-NEXT: lwz 4, 124(1)
; ASM64PWR4-NEXT: bl .test_vararg[PR]
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load float, ptr @f1, align 4
call void (i32, ...) @test_vararg(i32 42, float %0)
ret void
}
@c = common global i8 0, align 1
@si = common global i16 0, align 2
@i = common global i32 0, align 4
@lli = common global i64 0, align 8
@f = common global float 0.000000e+00, align 4
@d = common global double 0.000000e+00, align 8
; Basic saving of integral type arguments to the parameter save area.
define void @call_test_stackarg_int() {
; ASM32PWR4-LABEL: call_test_stackarg_int:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -80(1)
; ASM32PWR4-NEXT: lwz 3, L..C4(2) # @si
; ASM32PWR4-NEXT: stw 0, 88(1)
; ASM32PWR4-NEXT: lwz 4, L..C5(2) # @i
; ASM32PWR4-NEXT: li 6, 4
; ASM32PWR4-NEXT: li 8, 6
; ASM32PWR4-NEXT: li 9, 7
; ASM32PWR4-NEXT: li 10, 8
; ASM32PWR4-NEXT: lha 7, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C6(2) # @c
; ASM32PWR4-NEXT: lbz 11, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C7(2) # @lli
; ASM32PWR4-NEXT: lwz 5, 0(4)
; ASM32PWR4-NEXT: lwz 4, 0(3)
; ASM32PWR4-NEXT: lwz 3, 4(3)
; ASM32PWR4-NEXT: stw 5, 76(1)
; ASM32PWR4-NEXT: stw 3, 72(1)
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: stw 4, 68(1)
; ASM32PWR4-NEXT: li 4, 2
; ASM32PWR4-NEXT: stw 5, 64(1)
; ASM32PWR4-NEXT: li 5, 3
; ASM32PWR4-NEXT: stw 7, 60(1)
; ASM32PWR4-NEXT: li 7, 5
; ASM32PWR4-NEXT: stw 11, 56(1)
; ASM32PWR4-NEXT: bl .test_stackarg_int[PR]
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 80
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_stackarg_int:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -160(1)
; ASM64PWR4-NEXT: ld 3, L..C3(2) # @si
; ASM64PWR4-NEXT: std 0, 176(1)
; ASM64PWR4-NEXT: ld 4, L..C4(2) # @i
; ASM64PWR4-NEXT: li 6, 4
; ASM64PWR4-NEXT: li 8, 6
; ASM64PWR4-NEXT: li 9, 7
; ASM64PWR4-NEXT: li 10, 8
; ASM64PWR4-NEXT: lha 7, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C5(2) # @c
; ASM64PWR4-NEXT: lbz 11, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C6(2) # @lli
; ASM64PWR4-NEXT: lwz 5, 0(4)
; ASM64PWR4-NEXT: li 4, 2
; ASM64PWR4-NEXT: ld 3, 0(3)
; ASM64PWR4-NEXT: std 5, 144(1)
; ASM64PWR4-NEXT: std 3, 136(1)
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: std 5, 128(1)
; ASM64PWR4-NEXT: li 5, 3
; ASM64PWR4-NEXT: std 7, 120(1)
; ASM64PWR4-NEXT: li 7, 5
; ASM64PWR4-NEXT: std 11, 112(1)
; ASM64PWR4-NEXT: bl .test_stackarg_int[PR]
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 160
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load i8, ptr @c, align 1
%1 = load i16, ptr @si, align 2
%2 = load i32, ptr @i, align 4
%3 = load i64, ptr @lli, align 8
%4 = load i32, ptr @i, align 4
call void @test_stackarg_int(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i8 zeroext %0, i16 signext %1, i32 %2, i64 %3, i32 %4)
ret void
}
declare void @test_stackarg_int(i32, i32, i32, i32, i32, i32, i32, i32, i8 zeroext, i16 signext, i32, i64, i32)
; Basic saving of floating point type arguments to the parameter save area.
; The float and double arguments will pass in both fpr as well as parameter save area.
define void @call_test_stackarg_float() {
; ASM32PWR4-LABEL: call_test_stackarg_float:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -80(1)
; ASM32PWR4-NEXT: lwz 3, L..C8(2) # @f
; ASM32PWR4-NEXT: stw 0, 88(1)
; ASM32PWR4-NEXT: li 4, 2
; ASM32PWR4-NEXT: li 5, 3
; ASM32PWR4-NEXT: li 6, 4
; ASM32PWR4-NEXT: li 7, 5
; ASM32PWR4-NEXT: lfs 1, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C9(2) # @d
; ASM32PWR4-NEXT: li 8, 6
; ASM32PWR4-NEXT: li 9, 7
; ASM32PWR4-NEXT: lfd 2, 0(3)
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: li 10, 8
; ASM32PWR4-NEXT: stfd 2, 60(1)
; ASM32PWR4-NEXT: stfs 1, 56(1)
; ASM32PWR4-NEXT: bl .test_stackarg_float[PR]
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 80
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_stackarg_float:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: ld 3, L..C7(2) # @f
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: li 4, 2
; ASM64PWR4-NEXT: li 5, 3
; ASM64PWR4-NEXT: li 6, 4
; ASM64PWR4-NEXT: li 7, 5
; ASM64PWR4-NEXT: lfs 1, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C8(2) # @d
; ASM64PWR4-NEXT: li 8, 6
; ASM64PWR4-NEXT: li 9, 7
; ASM64PWR4-NEXT: lfd 2, 0(3)
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: li 10, 8
; ASM64PWR4-NEXT: stfd 2, 120(1)
; ASM64PWR4-NEXT: stfs 1, 112(1)
; ASM64PWR4-NEXT: bl .test_stackarg_float[PR]
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load float, ptr @f, align 4
%1 = load double, ptr @d, align 8
call void @test_stackarg_float(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, float %0, double %1)
ret void
}
declare void @test_stackarg_float(i32, i32, i32, i32, i32, i32, i32, i32, float, double)
define void @call_test_stackarg_float2() {
; ASM32PWR4-LABEL: call_test_stackarg_float2:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: lwz 3, L..C9(2) # @d
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: li 4, 2
; ASM32PWR4-NEXT: li 5, 3
; ASM32PWR4-NEXT: li 6, 4
; ASM32PWR4-NEXT: li 7, 5
; ASM32PWR4-NEXT: lfd 1, 0(3)
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: li 8, 6
; ASM32PWR4-NEXT: stfd 1, 56(1)
; ASM32PWR4-NEXT: lwz 9, 56(1)
; ASM32PWR4-NEXT: lwz 10, 60(1)
; ASM32PWR4-NEXT: bl .test_stackarg_float2[PR]
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_stackarg_float2:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: ld 3, L..C8(2) # @d
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: li 4, 2
; ASM64PWR4-NEXT: li 5, 3
; ASM64PWR4-NEXT: li 6, 4
; ASM64PWR4-NEXT: li 7, 5
; ASM64PWR4-NEXT: lfd 1, 0(3)
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: li 8, 6
; ASM64PWR4-NEXT: stfd 1, 120(1)
; ASM64PWR4-NEXT: ld 9, 120(1)
; ASM64PWR4-NEXT: bl .test_stackarg_float2[PR]
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load double, ptr @d, align 8
call void (i32, i32, i32, i32, i32, i32, ...) @test_stackarg_float2(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, double %0)
ret void
}
declare void @test_stackarg_float2(i32, i32, i32, i32, i32, i32, ...)
; A double arg will pass on the stack in PPC32 if there is only one available GPR.
define void @call_test_stackarg_float3() {
; ASM32PWR4-LABEL: call_test_stackarg_float3:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -80(1)
; ASM32PWR4-NEXT: lwz 3, L..C9(2) # @d
; ASM32PWR4-NEXT: stw 0, 88(1)
; ASM32PWR4-NEXT: li 4, 2
; ASM32PWR4-NEXT: li 5, 3
; ASM32PWR4-NEXT: li 6, 4
; ASM32PWR4-NEXT: li 7, 5
; ASM32PWR4-NEXT: lfd 1, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C8(2) # @f
; ASM32PWR4-NEXT: li 8, 6
; ASM32PWR4-NEXT: li 9, 7
; ASM32PWR4-NEXT: stfd 1, 72(1)
; ASM32PWR4-NEXT: lwz 10, 72(1)
; ASM32PWR4-NEXT: lfs 2, 0(3)
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: stfs 2, 60(1)
; ASM32PWR4-NEXT: stfd 1, 52(1)
; ASM32PWR4-NEXT: bl .test_stackarg_float3[PR]
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 80
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_stackarg_float3:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: ld 3, L..C8(2) # @d
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: li 4, 2
; ASM64PWR4-NEXT: li 5, 3
; ASM64PWR4-NEXT: li 6, 4
; ASM64PWR4-NEXT: li 7, 5
; ASM64PWR4-NEXT: lfd 1, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C7(2) # @f
; ASM64PWR4-NEXT: li 8, 6
; ASM64PWR4-NEXT: li 9, 7
; ASM64PWR4-NEXT: stfd 1, 120(1)
; ASM64PWR4-NEXT: ld 10, 120(1)
; ASM64PWR4-NEXT: lfs 2, 0(3)
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: stfs 2, 112(1)
; ASM64PWR4-NEXT: bl .test_stackarg_float3[PR]
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load double, ptr @d, align 8
%1 = load float, ptr @f, align 4
call void (i32, i32, i32, i32, i32, i32, i32, ...) @test_stackarg_float3(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, double %0, float %1)
ret void
}
declare void @test_stackarg_float3(i32, i32, i32, i32, i32, i32, i32, ...)
define i64 @test_ints_stack(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i64 %ll9, i16 signext %s10, i8 zeroext %c11, i32 %ui12, i32 %si13, i64 %ll14, i8 zeroext %uc15, i32 %i16) {
; ASM32PWR4-LABEL: test_ints_stack:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: add 3, 3, 4
; ASM32PWR4-NEXT: lwz 11, 92(1)
; ASM32PWR4-NEXT: add 3, 3, 5
; ASM32PWR4-NEXT: add 3, 3, 6
; ASM32PWR4-NEXT: add 3, 3, 7
; ASM32PWR4-NEXT: lwz 12, 76(1)
; ASM32PWR4-NEXT: add 3, 3, 8
; ASM32PWR4-NEXT: add 3, 3, 9
; ASM32PWR4-NEXT: lwz 6, 60(1)
; ASM32PWR4-NEXT: add 3, 3, 10
; ASM32PWR4-NEXT: srawi 5, 11, 31
; ASM32PWR4-NEXT: srawi 8, 3, 31
; ASM32PWR4-NEXT: lwz 4, 64(1)
; ASM32PWR4-NEXT: lwz 7, 56(1)
; ASM32PWR4-NEXT: stw 31, -4(1) # 4-byte Folded Spill
; ASM32PWR4-NEXT: srawi 31, 12, 31
; ASM32PWR4-NEXT: addc 3, 3, 6
; ASM32PWR4-NEXT: adde 7, 8, 7
; ASM32PWR4-NEXT: lwz 6, 68(1)
; ASM32PWR4-NEXT: srawi 8, 4, 31
; ASM32PWR4-NEXT: addc 3, 3, 4
; ASM32PWR4-NEXT: adde 7, 7, 8
; ASM32PWR4-NEXT: lwz 4, 72(1)
; ASM32PWR4-NEXT: addc 3, 3, 6
; ASM32PWR4-NEXT: addze 6, 7
; ASM32PWR4-NEXT: addc 3, 3, 4
; ASM32PWR4-NEXT: lwz 0, 84(1)
; ASM32PWR4-NEXT: addze 4, 6
; ASM32PWR4-NEXT: addc 3, 3, 12
; ASM32PWR4-NEXT: lwz 7, 80(1)
; ASM32PWR4-NEXT: adde 4, 4, 31
; ASM32PWR4-NEXT: addc 3, 3, 0
; ASM32PWR4-NEXT: lwz 6, 88(1)
; ASM32PWR4-NEXT: adde 4, 4, 7
; ASM32PWR4-NEXT: addc 3, 3, 6
; ASM32PWR4-NEXT: lwz 31, -4(1) # 4-byte Folded Reload
; ASM32PWR4-NEXT: addze 6, 4
; ASM32PWR4-NEXT: addc 4, 3, 11
; ASM32PWR4-NEXT: adde 3, 6, 5
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: test_ints_stack:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: add 3, 3, 4
; ASM64PWR4-NEXT: ld 4, 112(1)
; ASM64PWR4-NEXT: add 3, 3, 5
; ASM64PWR4-NEXT: add 3, 3, 6
; ASM64PWR4-NEXT: add 3, 3, 7
; ASM64PWR4-NEXT: lwa 12, 124(1)
; ASM64PWR4-NEXT: add 3, 3, 8
; ASM64PWR4-NEXT: add 3, 3, 9
; ASM64PWR4-NEXT: add 3, 3, 10
; ASM64PWR4-NEXT: extsw 3, 3
; ASM64PWR4-NEXT: lwz 5, 132(1)
; ASM64PWR4-NEXT: add 3, 3, 4
; ASM64PWR4-NEXT: add 3, 3, 12
; ASM64PWR4-NEXT: std 31, -8(1) # 8-byte Folded Spill
; ASM64PWR4-NEXT: add 3, 3, 5
; ASM64PWR4-NEXT: lwz 31, 140(1)
; ASM64PWR4-NEXT: lwa 11, 148(1)
; ASM64PWR4-NEXT: add 3, 3, 31
; ASM64PWR4-NEXT: add 3, 3, 11
; ASM64PWR4-NEXT: ld 4, 152(1)
; ASM64PWR4-NEXT: lwz 0, 164(1)
; ASM64PWR4-NEXT: add 3, 3, 4
; ASM64PWR4-NEXT: lwa 5, 172(1)
; ASM64PWR4-NEXT: add 3, 3, 0
; ASM64PWR4-NEXT: add 3, 3, 5
; ASM64PWR4-NEXT: ld 31, -8(1) # 8-byte Folded Reload
; ASM64PWR4-NEXT: blr
entry:
%add = add nsw i32 %i1, %i2
%add1 = add nsw i32 %add, %i3
%add2 = add nsw i32 %add1, %i4
%add3 = add nsw i32 %add2, %i5
%add4 = add nsw i32 %add3, %i6
%add5 = add nsw i32 %add4, %i7
%add6 = add nsw i32 %add5, %i8
%conv = sext i32 %add6 to i64
%add7 = add nsw i64 %conv, %ll9
%conv8 = sext i16 %s10 to i64
%add9 = add nsw i64 %add7, %conv8
%conv10 = zext i8 %c11 to i64
%add11 = add nsw i64 %add9, %conv10
%conv12 = zext i32 %ui12 to i64
%add13 = add nsw i64 %add11, %conv12
%conv14 = sext i32 %si13 to i64
%add15 = add nsw i64 %add13, %conv14
%add16 = add nsw i64 %add15, %ll14
%conv17 = zext i8 %uc15 to i64
%add18 = add nsw i64 %add16, %conv17
%conv19 = sext i32 %i16 to i64
%add20 = add nsw i64 %add18, %conv19
ret i64 %add20
}
@ll1 = common global i64 0, align 8
@si1 = common global i16 0, align 2
@ch = common global i8 0, align 1
@ui = common global i32 0, align 4
@sint = common global i32 0, align 4
@ll2 = common global i64 0, align 8
@uc1 = common global i8 0, align 1
@i1 = common global i32 0, align 4
define void @caller_ints_stack() {
; ASM32PWR4-LABEL: caller_ints_stack:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -96(1)
; ASM32PWR4-NEXT: lwz 3, L..C10(2) # @si1
; ASM32PWR4-NEXT: stw 0, 104(1)
; ASM32PWR4-NEXT: lwz 4, L..C11(2) # @ch
; ASM32PWR4-NEXT: lwz 6, L..C12(2) # @sint
; ASM32PWR4-NEXT: lwz 8, L..C13(2) # @ll2
; ASM32PWR4-NEXT: lwz 10, L..C14(2) # @uc1
; ASM32PWR4-NEXT: lwz 12, L..C15(2) # @i1
; ASM32PWR4-NEXT: lha 5, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C16(2) # @ll1
; ASM32PWR4-NEXT: lwz 11, 0(3)
; ASM32PWR4-NEXT: lwz 7, 4(3)
; ASM32PWR4-NEXT: lwz 3, L..C17(2) # @ui
; ASM32PWR4-NEXT: lbz 4, 0(4)
; ASM32PWR4-NEXT: lwz 3, 0(3)
; ASM32PWR4-NEXT: lwz 6, 0(6)
; ASM32PWR4-NEXT: lwz 9, 0(8)
; ASM32PWR4-NEXT: lwz 8, 4(8)
; ASM32PWR4-NEXT: lbz 10, 0(10)
; ASM32PWR4-NEXT: lwz 12, 0(12)
; ASM32PWR4-NEXT: stw 10, 88(1)
; ASM32PWR4-NEXT: li 10, 8
; ASM32PWR4-NEXT: stw 8, 84(1)
; ASM32PWR4-NEXT: li 8, 6
; ASM32PWR4-NEXT: stw 9, 80(1)
; ASM32PWR4-NEXT: li 9, 7
; ASM32PWR4-NEXT: stw 6, 76(1)
; ASM32PWR4-NEXT: li 6, 4
; ASM32PWR4-NEXT: stw 3, 72(1)
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: stw 4, 68(1)
; ASM32PWR4-NEXT: li 4, 2
; ASM32PWR4-NEXT: stw 5, 64(1)
; ASM32PWR4-NEXT: li 5, 3
; ASM32PWR4-NEXT: stw 7, 60(1)
; ASM32PWR4-NEXT: li 7, 5
; ASM32PWR4-NEXT: stw 12, 92(1)
; ASM32PWR4-NEXT: stw 11, 56(1)
; ASM32PWR4-NEXT: bl .test_ints_stack
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 96
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: caller_ints_stack:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -176(1)
; ASM64PWR4-NEXT: ld 3, L..C9(2) # @si1
; ASM64PWR4-NEXT: std 0, 192(1)
; ASM64PWR4-NEXT: ld 4, L..C10(2) # @ch
; ASM64PWR4-NEXT: ld 6, L..C11(2) # @ll2
; ASM64PWR4-NEXT: ld 8, L..C12(2) # @uc1
; ASM64PWR4-NEXT: ld 9, L..C13(2) # @i1
; ASM64PWR4-NEXT: li 10, 8
; ASM64PWR4-NEXT: lha 7, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C14(2) # @ll1
; ASM64PWR4-NEXT: ld 11, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C15(2) # @ui
; ASM64PWR4-NEXT: lbz 5, 0(4)
; ASM64PWR4-NEXT: ld 4, L..C16(2) # @sint
; ASM64PWR4-NEXT: lwz 3, 0(3)
; ASM64PWR4-NEXT: lwz 4, 0(4)
; ASM64PWR4-NEXT: ld 6, 0(6)
; ASM64PWR4-NEXT: lbz 8, 0(8)
; ASM64PWR4-NEXT: lwz 9, 0(9)
; ASM64PWR4-NEXT: std 9, 168(1)
; ASM64PWR4-NEXT: li 9, 7
; ASM64PWR4-NEXT: std 8, 160(1)
; ASM64PWR4-NEXT: li 8, 6
; ASM64PWR4-NEXT: std 6, 152(1)
; ASM64PWR4-NEXT: li 6, 4
; ASM64PWR4-NEXT: std 4, 144(1)
; ASM64PWR4-NEXT: li 4, 2
; ASM64PWR4-NEXT: std 3, 136(1)
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: std 5, 128(1)
; ASM64PWR4-NEXT: li 5, 3
; ASM64PWR4-NEXT: std 7, 120(1)
; ASM64PWR4-NEXT: li 7, 5
; ASM64PWR4-NEXT: std 11, 112(1)
; ASM64PWR4-NEXT: bl .test_ints_stack
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 176
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load i64, ptr @ll1, align 8
%1 = load i16, ptr @si1, align 2
%2 = load i8, ptr @ch, align 1
%3 = load i32, ptr @ui, align 4
%4 = load i32, ptr @sint, align 4
%5 = load i64, ptr @ll2, align 8
%6 = load i8, ptr @uc1, align 1
%7 = load i32, ptr @i1, align 4
%call = call i64 @test_ints_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i64 %0, i16 signext %1, i8 zeroext %2, i32 %3, i32 %4, i64 %5, i8 zeroext %6, i32 %7)
ret void
}
@globali1 = global i8 0, align 1
define void @test_i1_stack(i32 %a, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h, i32 %i, i1 zeroext %b) {
; ASM32PWR4-LABEL: test_i1_stack:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: lbz 3, 59(1)
; ASM32PWR4-NEXT: lwz 4, L..C18(2) # @globali1
; ASM32PWR4-NEXT: stb 3, 0(4)
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: test_i1_stack:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: lbz 3, 119(1)
; ASM64PWR4-NEXT: ld 4, L..C17(2) # @globali1
; ASM64PWR4-NEXT: stb 3, 0(4)
; ASM64PWR4-NEXT: blr
entry:
%frombool = zext i1 %b to i8
store i8 %frombool, ptr @globali1, align 1
ret void
}
define void @call_test_i1_stack() {
; ASM32PWR4-LABEL: call_test_i1_stack:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -64(1)
; ASM32PWR4-NEXT: li 11, 1
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: stw 0, 72(1)
; ASM32PWR4-NEXT: li 4, 2
; ASM32PWR4-NEXT: li 5, 3
; ASM32PWR4-NEXT: stw 11, 56(1)
; ASM32PWR4-NEXT: li 6, 4
; ASM32PWR4-NEXT: li 7, 5
; ASM32PWR4-NEXT: li 8, 6
; ASM32PWR4-NEXT: li 9, 7
; ASM32PWR4-NEXT: li 10, 8
; ASM32PWR4-NEXT: bl .test_i1_stack
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 64
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: call_test_i1_stack:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: li 11, 1
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: li 4, 2
; ASM64PWR4-NEXT: li 5, 3
; ASM64PWR4-NEXT: std 11, 112(1)
; ASM64PWR4-NEXT: li 6, 4
; ASM64PWR4-NEXT: li 7, 5
; ASM64PWR4-NEXT: li 8, 6
; ASM64PWR4-NEXT: li 9, 7
; ASM64PWR4-NEXT: li 10, 8
; ASM64PWR4-NEXT: bl .test_i1_stack
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
call void @test_i1_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i1 true)
ret void
}
define double @test_fpr_stack(double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, double %d8, double %d9, double %s10, double %l11, double %d12, double %d13, float %f14, double %d15, float %f16) {
; ASM32PWR4-LABEL: test_fpr_stack:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: fadd 0, 1, 2
; ASM32PWR4-NEXT: lfs 1, 128(1)
; ASM32PWR4-NEXT: fadd 0, 0, 3
; ASM32PWR4-NEXT: lfd 2, 132(1)
; ASM32PWR4-NEXT: fadd 0, 0, 4
; ASM32PWR4-NEXT: fadd 0, 0, 5
; ASM32PWR4-NEXT: fadd 0, 0, 6
; ASM32PWR4-NEXT: fadd 0, 0, 7
; ASM32PWR4-NEXT: fadd 0, 0, 8
; ASM32PWR4-NEXT: fadd 0, 0, 9
; ASM32PWR4-NEXT: fadd 0, 0, 10
; ASM32PWR4-NEXT: fadd 0, 0, 11
; ASM32PWR4-NEXT: fadd 0, 0, 12
; ASM32PWR4-NEXT: fadd 0, 0, 13
; ASM32PWR4-NEXT: fadd 0, 0, 13
; ASM32PWR4-NEXT: fadd 0, 0, 1
; ASM32PWR4-NEXT: lfs 1, 140(1)
; ASM32PWR4-NEXT: fadd 0, 0, 2
; ASM32PWR4-NEXT: fadd 1, 0, 1
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: test_fpr_stack:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: fadd 0, 1, 2
; ASM64PWR4-NEXT: lfs 1, 152(1)
; ASM64PWR4-NEXT: fadd 0, 0, 3
; ASM64PWR4-NEXT: lfd 2, 160(1)
; ASM64PWR4-NEXT: fadd 0, 0, 4
; ASM64PWR4-NEXT: fadd 0, 0, 5
; ASM64PWR4-NEXT: fadd 0, 0, 6
; ASM64PWR4-NEXT: fadd 0, 0, 7
; ASM64PWR4-NEXT: fadd 0, 0, 8
; ASM64PWR4-NEXT: fadd 0, 0, 9
; ASM64PWR4-NEXT: fadd 0, 0, 10
; ASM64PWR4-NEXT: fadd 0, 0, 11
; ASM64PWR4-NEXT: fadd 0, 0, 12
; ASM64PWR4-NEXT: fadd 0, 0, 13
; ASM64PWR4-NEXT: fadd 0, 0, 13
; ASM64PWR4-NEXT: fadd 0, 0, 1
; ASM64PWR4-NEXT: lfs 1, 168(1)
; ASM64PWR4-NEXT: fadd 0, 0, 2
; ASM64PWR4-NEXT: fadd 1, 0, 1
; ASM64PWR4-NEXT: blr
entry:
%add = fadd double %d1, %d2
%add1 = fadd double %add, %d3
%add2 = fadd double %add1, %d4
%add3 = fadd double %add2, %d5
%add4 = fadd double %add3, %d6
%add5 = fadd double %add4, %d7
%add6 = fadd double %add5, %d8
%add7 = fadd double %add6, %d9
%add8 = fadd double %add7, %s10
%add9 = fadd double %add8, %l11
%add10 = fadd double %add9, %d12
%add11 = fadd double %add10, %d13
%add12 = fadd double %add11, %d13
%conv = fpext float %f14 to double
%add13 = fadd double %add12, %conv
%add14 = fadd double %add13, %d15
%conv15 = fpext float %f16 to double
%add16 = fadd double %add14, %conv15
ret double %add16
}
@f14 = common global float 0.000000e+00, align 4
@d15 = common global double 0.000000e+00, align 8
@f16 = common global float 0.000000e+00, align 4
define void @caller_fpr_stack() {
; ASM32PWR4-LABEL: caller_fpr_stack:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -144(1)
; ASM32PWR4-NEXT: lwz 3, L..C19(2) # @d15
; ASM32PWR4-NEXT: lwz 4, L..C20(2) # @f14
; ASM32PWR4-NEXT: lwz 5, L..C21(2) # @f16
; ASM32PWR4-NEXT: stw 0, 152(1)
; ASM32PWR4-NEXT: lis 6, 16361
; ASM32PWR4-NEXT: ori 6, 6, 39321
; ASM32PWR4-NEXT: lfd 0, 0(3)
; ASM32PWR4-NEXT: lwz 3, 0(4)
; ASM32PWR4-NEXT: lwz 4, 0(5)
; ASM32PWR4-NEXT: li 5, 0
; ASM32PWR4-NEXT: stw 5, 60(1)
; ASM32PWR4-NEXT: lis 5, 16352
; ASM32PWR4-NEXT: stw 5, 56(1)
; ASM32PWR4-NEXT: lis 5, 13107
; ASM32PWR4-NEXT: ori 5, 5, 13107
; ASM32PWR4-NEXT: stw 5, 68(1)
; ASM32PWR4-NEXT: lis 5, 16355
; ASM32PWR4-NEXT: ori 5, 5, 13107
; ASM32PWR4-NEXT: stw 5, 64(1)
; ASM32PWR4-NEXT: lis 5, 26214
; ASM32PWR4-NEXT: ori 5, 5, 26214
; ASM32PWR4-NEXT: stw 5, 76(1)
; ASM32PWR4-NEXT: lis 5, 16358
; ASM32PWR4-NEXT: ori 5, 5, 26214
; ASM32PWR4-NEXT: stw 5, 72(1)
; ASM32PWR4-NEXT: lis 5, -26215
; ASM32PWR4-NEXT: ori 5, 5, 39322
; ASM32PWR4-NEXT: stw 5, 84(1)
; ASM32PWR4-NEXT: stw 5, 100(1)
; ASM32PWR4-NEXT: lis 5, 16313
; ASM32PWR4-NEXT: ori 5, 5, 39321
; ASM32PWR4-NEXT: stw 5, 96(1)
; ASM32PWR4-NEXT: lis 5, -15729
; ASM32PWR4-NEXT: ori 5, 5, 23593
; ASM32PWR4-NEXT: stw 5, 108(1)
; ASM32PWR4-NEXT: lis 5, 16316
; ASM32PWR4-NEXT: ori 5, 5, 10485
; ASM32PWR4-NEXT: stw 5, 104(1)
; ASM32PWR4-NEXT: lis 5, -5243
; ASM32PWR4-NEXT: ori 5, 5, 7864
; ASM32PWR4-NEXT: stw 5, 116(1)
; ASM32PWR4-NEXT: lis 5, 16318
; ASM32PWR4-NEXT: ori 5, 5, 47185
; ASM32PWR4-NEXT: stw 6, 80(1)
; ASM32PWR4-NEXT: lis 6, -13108
; ASM32PWR4-NEXT: ori 6, 6, 52429
; ASM32PWR4-NEXT: stw 5, 112(1)
; ASM32PWR4-NEXT: lis 5, 2621
; ASM32PWR4-NEXT: ori 5, 5, 28836
; ASM32PWR4-NEXT: stw 6, 92(1)
; ASM32PWR4-NEXT: lis 6, 16364
; ASM32PWR4-NEXT: ori 6, 6, 52428
; ASM32PWR4-NEXT: stw 5, 124(1)
; ASM32PWR4-NEXT: lis 5, 16320
; ASM32PWR4-NEXT: ori 5, 5, 41943
; ASM32PWR4-NEXT: stw 6, 88(1)
; ASM32PWR4-NEXT: lwz 6, L..C22(2) # %const.0
; ASM32PWR4-NEXT: stw 5, 120(1)
; ASM32PWR4-NEXT: lwz 5, L..C23(2) # %const.1
; ASM32PWR4-NEXT: lfd 2, 0(6)
; ASM32PWR4-NEXT: lwz 6, L..C24(2) # %const.2
; ASM32PWR4-NEXT: lfd 3, 0(5)
; ASM32PWR4-NEXT: lwz 5, L..C25(2) # %const.3
; ASM32PWR4-NEXT: lfd 4, 0(6)
; ASM32PWR4-NEXT: lwz 6, L..C26(2) # %const.4
; ASM32PWR4-NEXT: lfd 6, 0(5)
; ASM32PWR4-NEXT: lwz 5, L..C27(2) # %const.5
; ASM32PWR4-NEXT: lfd 7, 0(6)
; ASM32PWR4-NEXT: lwz 6, L..C28(2) # %const.6
; ASM32PWR4-NEXT: lfd 8, 0(5)
; ASM32PWR4-NEXT: lwz 5, L..C29(2) # %const.7
; ASM32PWR4-NEXT: lfd 9, 0(6)
; ASM32PWR4-NEXT: lwz 6, L..C30(2) # %const.8
; ASM32PWR4-NEXT: lfd 1, 0(5)
; ASM32PWR4-NEXT: lwz 5, L..C31(2) # %const.9
; ASM32PWR4-NEXT: lfd 11, 0(6)
; ASM32PWR4-NEXT: lwz 6, L..C32(2) # %const.10
; ASM32PWR4-NEXT: fmr 10, 1
; ASM32PWR4-NEXT: lfd 12, 0(5)
; ASM32PWR4-NEXT: lwz 5, L..C33(2) # %const.11
; ASM32PWR4-NEXT: lfd 13, 0(6)
; ASM32PWR4-NEXT: lfs 5, 0(5)
; ASM32PWR4-NEXT: stfd 0, 132(1)
; ASM32PWR4-NEXT: stw 4, 140(1)
; ASM32PWR4-NEXT: stw 3, 128(1)
; ASM32PWR4-NEXT: bl .test_fpr_stack
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 144
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: caller_fpr_stack:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -176(1)
; ASM64PWR4-NEXT: ld 3, L..C18(2) # @f14
; ASM64PWR4-NEXT: std 0, 192(1)
; ASM64PWR4-NEXT: ld 4, L..C19(2) # @d15
; ASM64PWR4-NEXT: ld 5, L..C20(2) # @f16
; ASM64PWR4-NEXT: ld 6, L..C21(2) # %const.9
; ASM64PWR4-NEXT: lis 7, 16313
; ASM64PWR4-NEXT: lwz 3, 0(3)
; ASM64PWR4-NEXT: ld 4, 0(4)
; ASM64PWR4-NEXT: lwz 5, 0(5)
; ASM64PWR4-NEXT: stw 3, 152(1)
; ASM64PWR4-NEXT: ld 3, L..C22(2) # %const.0
; ASM64PWR4-NEXT: std 4, 160(1)
; ASM64PWR4-NEXT: ld 4, L..C23(2) # %const.1
; ASM64PWR4-NEXT: lfd 2, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C24(2) # %const.2
; ASM64PWR4-NEXT: lfd 3, 0(4)
; ASM64PWR4-NEXT: ld 4, L..C25(2) # %const.3
; ASM64PWR4-NEXT: lfd 4, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C26(2) # %const.4
; ASM64PWR4-NEXT: lfd 6, 0(4)
; ASM64PWR4-NEXT: ld 4, L..C27(2) # %const.5
; ASM64PWR4-NEXT: lfd 7, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C28(2) # %const.6
; ASM64PWR4-NEXT: lfd 8, 0(4)
; ASM64PWR4-NEXT: ld 4, L..C29(2) # %const.7
; ASM64PWR4-NEXT: lfd 9, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C30(2) # %const.8
; ASM64PWR4-NEXT: lfd 1, 0(4)
; ASM64PWR4-NEXT: lis 4, 16320
; ASM64PWR4-NEXT: ori 4, 4, 41943
; ASM64PWR4-NEXT: rldic 4, 4, 32, 2
; ASM64PWR4-NEXT: lfd 11, 0(3)
; ASM64PWR4-NEXT: lis 3, 16316
; ASM64PWR4-NEXT: fmr 10, 1
; ASM64PWR4-NEXT: ori 3, 3, 10485
; ASM64PWR4-NEXT: oris 4, 4, 2621
; ASM64PWR4-NEXT: stw 5, 168(1)
; ASM64PWR4-NEXT: lis 5, 16318
; ASM64PWR4-NEXT: rldic 3, 3, 32, 2
; ASM64PWR4-NEXT: ori 5, 5, 47185
; ASM64PWR4-NEXT: ori 4, 4, 28836
; ASM64PWR4-NEXT: lfd 12, 0(6)
; ASM64PWR4-NEXT: ld 6, L..C31(2) # %const.10
; ASM64PWR4-NEXT: oris 3, 3, 49807
; ASM64PWR4-NEXT: ori 3, 3, 23593
; ASM64PWR4-NEXT: std 4, 144(1)
; ASM64PWR4-NEXT: rldic 4, 5, 32, 2
; ASM64PWR4-NEXT: oris 4, 4, 60293
; ASM64PWR4-NEXT: ori 4, 4, 7864
; ASM64PWR4-NEXT: std 3, 128(1)
; ASM64PWR4-NEXT: ld 3, L..C32(2) # %const.11
; ASM64PWR4-NEXT: ori 5, 7, 39321
; ASM64PWR4-NEXT: rldic 5, 5, 32, 2
; ASM64PWR4-NEXT: std 4, 136(1)
; ASM64PWR4-NEXT: lis 4, 4091
; ASM64PWR4-NEXT: ori 4, 4, 13107
; ASM64PWR4-NEXT: rldic 4, 4, 34, 2
; ASM64PWR4-NEXT: lfs 5, 0(3)
; ASM64PWR4-NEXT: oris 3, 5, 39321
; ASM64PWR4-NEXT: ori 3, 3, 39322
; ASM64PWR4-NEXT: lfd 13, 0(6)
; ASM64PWR4-NEXT: std 3, 120(1)
; ASM64PWR4-NEXT: oris 3, 4, 52428
; ASM64PWR4-NEXT: ori 3, 3, 52429
; ASM64PWR4-NEXT: std 3, 112(1)
; ASM64PWR4-NEXT: bl .test_fpr_stack
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 176
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%0 = load float, ptr @f14, align 4
%1 = load double, ptr @d15, align 8
%2 = load float, ptr @f16, align 4
%call = call double @test_fpr_stack(double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, double 5.000000e-01, double 6.000000e-01, double 0x3FE6666666666666, double 8.000000e-01, double 9.000000e-01, double 1.000000e-01, double 1.100000e-01, double 1.200000e-01, double 1.300000e-01, float %0, double %1, float %2)
ret void
}
define i32 @mix_callee(double %d1, double %d2, double %d3, double %d4, i8 zeroext %c1, i16 signext %s1, i64 %ll1, i32 %i1, i32 %i2, i32 %i3) {
; ASM32PWR4-LABEL: mix_callee:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: lwz 4, 60(1)
; ASM32PWR4-NEXT: lis 8, 17200
; ASM32PWR4-NEXT: fadd 1, 1, 2
; ASM32PWR4-NEXT: fadd 1, 1, 3
; ASM32PWR4-NEXT: lwz 5, 56(1)
; ASM32PWR4-NEXT: lwz 3, 68(1)
; ASM32PWR4-NEXT: add 4, 5, 4
; ASM32PWR4-NEXT: lwz 5, L..C34(2) # %const.0
; ASM32PWR4-NEXT: fadd 1, 1, 4
; ASM32PWR4-NEXT: lwz 6, 72(1)
; ASM32PWR4-NEXT: add 3, 4, 3
; ASM32PWR4-NEXT: lwz 7, 76(1)
; ASM32PWR4-NEXT: add 3, 3, 6
; ASM32PWR4-NEXT: stw 8, -16(1)
; ASM32PWR4-NEXT: add 3, 3, 7
; ASM32PWR4-NEXT: lwz 8, 80(1)
; ASM32PWR4-NEXT: add 3, 3, 8
; ASM32PWR4-NEXT: lfs 0, 0(5)
; ASM32PWR4-NEXT: xoris 3, 3, 32768
; ASM32PWR4-NEXT: stw 3, -12(1)
; ASM32PWR4-NEXT: addi 3, 1, -4
; ASM32PWR4-NEXT: lfd 2, -16(1)
; ASM32PWR4-NEXT: fsub 0, 2, 0
; ASM32PWR4-NEXT: fadd 0, 0, 1
; ASM32PWR4-NEXT: fctiwz 0, 0
; ASM32PWR4-NEXT: stfiwx 0, 0, 3
; ASM32PWR4-NEXT: lwz 3, -4(1)
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: mix_callee:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: lwz 3, 116(1)
; ASM64PWR4-NEXT: add 4, 7, 8
; ASM64PWR4-NEXT: fadd 0, 1, 2
; ASM64PWR4-NEXT: add 4, 4, 9
; ASM64PWR4-NEXT: fadd 0, 0, 3
; ASM64PWR4-NEXT: add 4, 4, 10
; ASM64PWR4-NEXT: lwz 5, 124(1)
; ASM64PWR4-NEXT: add 3, 4, 3
; ASM64PWR4-NEXT: add 3, 3, 5
; ASM64PWR4-NEXT: fadd 0, 0, 4
; ASM64PWR4-NEXT: extsw 3, 3
; ASM64PWR4-NEXT: std 3, -16(1)
; ASM64PWR4-NEXT: addi 3, 1, -4
; ASM64PWR4-NEXT: lfd 1, -16(1)
; ASM64PWR4-NEXT: fcfid 1, 1
; ASM64PWR4-NEXT: fadd 0, 1, 0
; ASM64PWR4-NEXT: fctiwz 0, 0
; ASM64PWR4-NEXT: stfiwx 0, 0, 3
; ASM64PWR4-NEXT: lwz 3, -4(1)
; ASM64PWR4-NEXT: blr
entry:
%add = fadd double %d1, %d2
%add1 = fadd double %add, %d3
%add2 = fadd double %add1, %d4
%conv = zext i8 %c1 to i32
%conv3 = sext i16 %s1 to i32
%add4 = add nsw i32 %conv, %conv3
%conv5 = sext i32 %add4 to i64
%add6 = add nsw i64 %conv5, %ll1
%conv7 = sext i32 %i1 to i64
%add8 = add nsw i64 %add6, %conv7
%conv9 = sext i32 %i2 to i64
%add10 = add nsw i64 %add8, %conv9
%conv11 = sext i32 %i3 to i64
%add12 = add nsw i64 %add10, %conv11
%conv13 = trunc i64 %add12 to i32
%conv14 = sitofp i32 %conv13 to double
%add15 = fadd double %conv14, %add2
%conv16 = fptosi double %add15 to i32
ret i32 %conv16
}
define void @caller_mix() {
; ASM32PWR4-LABEL: caller_mix:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -96(1)
; ASM32PWR4-NEXT: li 3, 60
; ASM32PWR4-NEXT: stw 0, 104(1)
; ASM32PWR4-NEXT: stw 3, 80(1)
; ASM32PWR4-NEXT: li 3, 50
; ASM32PWR4-NEXT: stw 3, 76(1)
; ASM32PWR4-NEXT: li 3, 40
; ASM32PWR4-NEXT: stw 3, 72(1)
; ASM32PWR4-NEXT: li 3, 0
; ASM32PWR4-NEXT: stw 3, 64(1)
; ASM32PWR4-NEXT: li 3, 2
; ASM32PWR4-NEXT: stw 3, 60(1)
; ASM32PWR4-NEXT: lwz 3, L..C35(2) # %const.0
; ASM32PWR4-NEXT: lfd 1, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C36(2) # %const.1
; ASM32PWR4-NEXT: lfd 2, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C37(2) # %const.2
; ASM32PWR4-NEXT: lfd 3, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C38(2) # %const.3
; ASM32PWR4-NEXT: lfd 4, 0(3)
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: stw 3, 56(1)
; ASM32PWR4-NEXT: lis 3, 457
; ASM32PWR4-NEXT: ori 3, 3, 50048
; ASM32PWR4-NEXT: stw 3, 68(1)
; ASM32PWR4-NEXT: bl .mix_callee
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 96
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: caller_mix:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -128(1)
; ASM64PWR4-NEXT: ld 3, L..C33(2) # %const.0
; ASM64PWR4-NEXT: ld 4, L..C34(2) # %const.1
; ASM64PWR4-NEXT: lis 5, 457
; ASM64PWR4-NEXT: li 7, 1
; ASM64PWR4-NEXT: std 0, 144(1)
; ASM64PWR4-NEXT: ori 9, 5, 50048
; ASM64PWR4-NEXT: li 8, 2
; ASM64PWR4-NEXT: lfd 1, 0(3)
; ASM64PWR4-NEXT: ld 3, L..C35(2) # %const.2
; ASM64PWR4-NEXT: li 10, 40
; ASM64PWR4-NEXT: lfd 2, 0(4)
; ASM64PWR4-NEXT: ld 4, L..C36(2) # %const.3
; ASM64PWR4-NEXT: lfd 3, 0(3)
; ASM64PWR4-NEXT: li 3, 60
; ASM64PWR4-NEXT: lfd 4, 0(4)
; ASM64PWR4-NEXT: li 4, 50
; ASM64PWR4-NEXT: std 3, 120(1)
; ASM64PWR4-NEXT: std 4, 112(1)
; ASM64PWR4-NEXT: bl .mix_callee
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: addi 1, 1, 128
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%call = call i32 @mix_callee(double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, i8 zeroext 1, i16 signext 2, i64 30000000, i32 40, i32 50, i32 60)
ret void
}
define i32 @mix_floats(i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, double %d1, double %d2, double %d3, double %d4, double %d5, double %d6, double %d7, double %d8, double %d9, double %d10, double %d11, double %d12, double %d13, double %d14) {
; ASM32PWR4-LABEL: mix_floats:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: add 3, 3, 4
; ASM32PWR4-NEXT: lwz 4, L..C39(2) # %const.0
; ASM32PWR4-NEXT: lis 11, 17200
; ASM32PWR4-NEXT: stfd 31, -8(1) # 8-byte Folded Spill
; ASM32PWR4-NEXT: add 3, 3, 5
; ASM32PWR4-NEXT: add 3, 3, 6
; ASM32PWR4-NEXT: add 3, 3, 7
; ASM32PWR4-NEXT: stw 11, -24(1)
; ASM32PWR4-NEXT: add 3, 3, 8
; ASM32PWR4-NEXT: add 3, 3, 9
; ASM32PWR4-NEXT: add 3, 3, 10
; ASM32PWR4-NEXT: lfs 0, 0(4)
; ASM32PWR4-NEXT: xoris 3, 3, 32768
; ASM32PWR4-NEXT: stw 3, -20(1)
; ASM32PWR4-NEXT: addi 3, 1, -12
; ASM32PWR4-NEXT: lfd 31, -24(1)
; ASM32PWR4-NEXT: fsub 0, 31, 0
; ASM32PWR4-NEXT: fadd 0, 0, 1
; ASM32PWR4-NEXT: lfd 1, 160(1)
; ASM32PWR4-NEXT: fadd 0, 0, 2
; ASM32PWR4-NEXT: fadd 0, 0, 3
; ASM32PWR4-NEXT: fadd 0, 0, 4
; ASM32PWR4-NEXT: fadd 0, 0, 5
; ASM32PWR4-NEXT: fadd 0, 0, 6
; ASM32PWR4-NEXT: fadd 0, 0, 7
; ASM32PWR4-NEXT: fadd 0, 0, 8
; ASM32PWR4-NEXT: fadd 0, 0, 9
; ASM32PWR4-NEXT: fadd 0, 0, 10
; ASM32PWR4-NEXT: fadd 0, 0, 11
; ASM32PWR4-NEXT: fadd 0, 0, 12
; ASM32PWR4-NEXT: fadd 0, 0, 13
; ASM32PWR4-NEXT: fadd 0, 0, 1
; ASM32PWR4-NEXT: fctiwz 0, 0
; ASM32PWR4-NEXT: stfiwx 0, 0, 3
; ASM32PWR4-NEXT: lwz 3, -12(1)
; ASM32PWR4-NEXT: lfd 31, -8(1) # 8-byte Folded Reload
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: mix_floats:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: add 3, 3, 4
; ASM64PWR4-NEXT: add 3, 3, 5
; ASM64PWR4-NEXT: add 3, 3, 6
; ASM64PWR4-NEXT: add 3, 3, 7
; ASM64PWR4-NEXT: add 3, 3, 8
; ASM64PWR4-NEXT: add 3, 3, 9
; ASM64PWR4-NEXT: add 3, 3, 10
; ASM64PWR4-NEXT: extsw 3, 3
; ASM64PWR4-NEXT: std 3, -16(1)
; ASM64PWR4-NEXT: addi 3, 1, -4
; ASM64PWR4-NEXT: lfd 0, -16(1)
; ASM64PWR4-NEXT: fcfid 0, 0
; ASM64PWR4-NEXT: fadd 0, 0, 1
; ASM64PWR4-NEXT: lfd 1, 216(1)
; ASM64PWR4-NEXT: fadd 0, 0, 2
; ASM64PWR4-NEXT: fadd 0, 0, 3
; ASM64PWR4-NEXT: fadd 0, 0, 4
; ASM64PWR4-NEXT: fadd 0, 0, 5
; ASM64PWR4-NEXT: fadd 0, 0, 6
; ASM64PWR4-NEXT: fadd 0, 0, 7
; ASM64PWR4-NEXT: fadd 0, 0, 8
; ASM64PWR4-NEXT: fadd 0, 0, 9
; ASM64PWR4-NEXT: fadd 0, 0, 10
; ASM64PWR4-NEXT: fadd 0, 0, 11
; ASM64PWR4-NEXT: fadd 0, 0, 12
; ASM64PWR4-NEXT: fadd 0, 0, 13
; ASM64PWR4-NEXT: fadd 0, 0, 1
; ASM64PWR4-NEXT: fctiwz 0, 0
; ASM64PWR4-NEXT: stfiwx 0, 0, 3
; ASM64PWR4-NEXT: lwz 3, -4(1)
; ASM64PWR4-NEXT: blr
entry:
%add = add nsw i32 %i1, %i2
%add1 = add nsw i32 %add, %i3
%add2 = add nsw i32 %add1, %i4
%add3 = add nsw i32 %add2, %i5
%add4 = add nsw i32 %add3, %i6
%add5 = add nsw i32 %add4, %i7
%add6 = add nsw i32 %add5, %i8
%conv = sitofp i32 %add6 to double
%add7 = fadd double %conv, %d1
%add8 = fadd double %add7, %d2
%add9 = fadd double %add8, %d3
%add10 = fadd double %add9, %d4
%add11 = fadd double %add10, %d5
%add12 = fadd double %add11, %d6
%add13 = fadd double %add12, %d7
%add14 = fadd double %add13, %d8
%add15 = fadd double %add14, %d9
%add16 = fadd double %add15, %d10
%add17 = fadd double %add16, %d11
%add18 = fadd double %add17, %d12
%add19 = fadd double %add18, %d13
%add20 = fadd double %add19, %d14
%conv21 = fptosi double %add20 to i32
ret i32 %conv21
}
define void @mix_floats_caller() {
; ASM32PWR4-LABEL: mix_floats_caller:
; ASM32PWR4: # %bb.0: # %entry
; ASM32PWR4-NEXT: mflr 0
; ASM32PWR4-NEXT: stwu 1, -176(1)
; ASM32PWR4-NEXT: li 3, 0
; ASM32PWR4-NEXT: stw 0, 184(1)
; ASM32PWR4-NEXT: lis 4, 16352
; ASM32PWR4-NEXT: lis 5, 16339
; ASM32PWR4-NEXT: lis 6, 16364
; ASM32PWR4-NEXT: stw 3, 92(1)
; ASM32PWR4-NEXT: ori 5, 5, 13107
; ASM32PWR4-NEXT: ori 6, 6, 52428
; ASM32PWR4-NEXT: stw 3, 132(1)
; ASM32PWR4-NEXT: lis 3, 16368
; ASM32PWR4-NEXT: li 8, 6
; ASM32PWR4-NEXT: li 9, 7
; ASM32PWR4-NEXT: li 10, 8
; ASM32PWR4-NEXT: stw 3, 128(1)
; ASM32PWR4-NEXT: lis 3, -26215
; ASM32PWR4-NEXT: ori 3, 3, 39322
; ASM32PWR4-NEXT: stw 4, 88(1)
; ASM32PWR4-NEXT: lis 4, 16313
; ASM32PWR4-NEXT: ori 4, 4, 39321
; ASM32PWR4-NEXT: stw 3, 60(1)
; ASM32PWR4-NEXT: stw 3, 68(1)
; ASM32PWR4-NEXT: stw 3, 84(1)
; ASM32PWR4-NEXT: stw 3, 116(1)
; ASM32PWR4-NEXT: stw 3, 140(1)
; ASM32PWR4-NEXT: lis 3, 16369
; ASM32PWR4-NEXT: ori 3, 3, 39321
; ASM32PWR4-NEXT: stw 4, 56(1)
; ASM32PWR4-NEXT: lis 4, 16329
; ASM32PWR4-NEXT: ori 4, 4, 39321
; ASM32PWR4-NEXT: stw 3, 136(1)
; ASM32PWR4-NEXT: lis 3, 16371
; ASM32PWR4-NEXT: ori 3, 3, 13107
; ASM32PWR4-NEXT: stw 4, 64(1)
; ASM32PWR4-NEXT: lis 4, 13107
; ASM32PWR4-NEXT: ori 4, 4, 13107
; ASM32PWR4-NEXT: stw 3, 144(1)
; ASM32PWR4-NEXT: lis 3, 16372
; ASM32PWR4-NEXT: ori 3, 3, 52428
; ASM32PWR4-NEXT: stw 4, 76(1)
; ASM32PWR4-NEXT: stw 4, 100(1)
; ASM32PWR4-NEXT: stw 4, 148(1)
; ASM32PWR4-NEXT: lwz 4, L..C40(2) # %const.0
; ASM32PWR4-NEXT: stw 3, 152(1)
; ASM32PWR4-NEXT: lwz 3, L..C41(2) # %const.1
; ASM32PWR4-NEXT: lfd 1, 0(4)
; ASM32PWR4-NEXT: lwz 4, L..C42(2) # %const.2
; ASM32PWR4-NEXT: lfd 2, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C43(2) # %const.3
; ASM32PWR4-NEXT: stw 5, 72(1)
; ASM32PWR4-NEXT: lis 5, 16345
; ASM32PWR4-NEXT: ori 5, 5, 39321
; ASM32PWR4-NEXT: stw 5, 80(1)
; ASM32PWR4-NEXT: lis 5, 16355
; ASM32PWR4-NEXT: ori 5, 5, 13107
; ASM32PWR4-NEXT: lfd 3, 0(4)
; ASM32PWR4-NEXT: lwz 4, L..C44(2) # %const.4
; ASM32PWR4-NEXT: lfd 4, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C45(2) # %const.5
; ASM32PWR4-NEXT: stw 5, 96(1)
; ASM32PWR4-NEXT: lis 5, 26214
; ASM32PWR4-NEXT: ori 7, 5, 26214
; ASM32PWR4-NEXT: lis 5, 16358
; ASM32PWR4-NEXT: lfd 6, 0(4)
; ASM32PWR4-NEXT: lwz 4, L..C46(2) # %const.6
; ASM32PWR4-NEXT: ori 5, 5, 26214
; ASM32PWR4-NEXT: lfd 7, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C47(2) # %const.7
; ASM32PWR4-NEXT: stw 5, 104(1)
; ASM32PWR4-NEXT: lis 5, 16361
; ASM32PWR4-NEXT: ori 5, 5, 39321
; ASM32PWR4-NEXT: lfd 8, 0(4)
; ASM32PWR4-NEXT: lwz 4, L..C48(2) # %const.8
; ASM32PWR4-NEXT: lfd 9, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C49(2) # %const.9
; ASM32PWR4-NEXT: stw 5, 112(1)
; ASM32PWR4-NEXT: lis 5, -13108
; ASM32PWR4-NEXT: ori 5, 5, 52429
; ASM32PWR4-NEXT: stw 5, 124(1)
; ASM32PWR4-NEXT: stw 5, 156(1)
; ASM32PWR4-NEXT: lwz 5, L..C50(2) # %const.12
; ASM32PWR4-NEXT: lfd 11, 0(4)
; ASM32PWR4-NEXT: lwz 4, L..C51(2) # %const.10
; ASM32PWR4-NEXT: lfd 12, 0(3)
; ASM32PWR4-NEXT: lwz 3, L..C52(2) # %const.11
; ASM32PWR4-NEXT: lfd 13, 0(4)
; ASM32PWR4-NEXT: lis 4, 16374
; ASM32PWR4-NEXT: ori 11, 4, 26214
; ASM32PWR4-NEXT: li 4, 2
; ASM32PWR4-NEXT: lfs 5, 0(3)
; ASM32PWR4-NEXT: li 3, 1
; ASM32PWR4-NEXT: lfs 10, 0(5)
; ASM32PWR4-NEXT: li 5, 3
; ASM32PWR4-NEXT: stw 7, 108(1)
; ASM32PWR4-NEXT: stw 6, 120(1)
; ASM32PWR4-NEXT: li 6, 4
; ASM32PWR4-NEXT: stw 7, 164(1)
; ASM32PWR4-NEXT: li 7, 5
; ASM32PWR4-NEXT: stw 11, 160(1)
; ASM32PWR4-NEXT: bl .mix_floats
; ASM32PWR4-NEXT: nop
; ASM32PWR4-NEXT: addi 1, 1, 176
; ASM32PWR4-NEXT: lwz 0, 8(1)
; ASM32PWR4-NEXT: mtlr 0
; ASM32PWR4-NEXT: blr
;
; ASM64PWR4-LABEL: mix_floats_caller:
; ASM64PWR4: # %bb.0: # %entry
; ASM64PWR4-NEXT: mflr 0
; ASM64PWR4-NEXT: stdu 1, -240(1)
; ASM64PWR4-NEXT: li 3, 1023
; ASM64PWR4-NEXT: std 0, 256(1)
; ASM64PWR4-NEXT: ld 4, L..C37(2) # %const.0
; ASM64PWR4-NEXT: ld 8, L..C38(2) # %const.6
; ASM64PWR4-NEXT: lis 5, 16371
; ASM64PWR4-NEXT: ld 6, L..C39(2) # %const.3
; ASM64PWR4-NEXT: ld 9, L..C40(2) # %const.9
; ASM64PWR4-NEXT: ld 10, L..C41(2) # %const.11
; ASM64PWR4-NEXT: rldic 3, 3, 52, 2
; ASM64PWR4-NEXT: lis 11, 4091
; ASM64PWR4-NEXT: std 3, 184(1)
; ASM64PWR4-NEXT: li 3, 511
; ASM64PWR4-NEXT: lis 12, 16361
; ASM64PWR4-NEXT: rldic 3, 3, 53, 2
; ASM64PWR4-NEXT: lfd 1, 0(4)
; ASM64PWR4-NEXT: ld 4, L..C42(2) # %const.2
; ASM64PWR4-NEXT: lis 0, 16345
; ASM64PWR4-NEXT: std 3, 144(1)
; ASM64PWR4-NEXT: ld 3, L..C43(2) # %const.1
; ASM64PWR4-NEXT: lfd 2, 0(3)
; ASM64PWR4-NEXT: lis 3, 16374
; ASM64PWR4-NEXT: ori 7, 3, 26214
; ASM64PWR4-NEXT: ori 3, 5, 13107
; ASM64PWR4-NEXT: ld 5, L..C44(2) # %const.5
; ASM64PWR4-NEXT: lfd 8, 0(8)
; ASM64PWR4-NEXT: ld 8, L..C45(2) # %const.8
; ASM64PWR4-NEXT: rldimi 7, 7, 32, 0
; ASM64PWR4-NEXT: rlwimi 7, 7, 16, 0, 15
; ASM64PWR4-NEXT: rldimi 3, 3, 32, 0
; ASM64PWR4-NEXT: lfd 3, 0(4)
; ASM64PWR4-NEXT: ld 4, L..C46(2) # %const.4
; ASM64PWR4-NEXT: rlwimi 3, 3, 16, 0, 15
; ASM64PWR4-NEXT: lfd 4, 0(6)
; ASM64PWR4-NEXT: lis 6, 16355
; ASM64PWR4-NEXT: lfd 7, 0(5)
; ASM64PWR4-NEXT: ori 5, 6, 13107
; ASM64PWR4-NEXT: ld 6, L..C47(2) # %const.7
; ASM64PWR4-NEXT: rldimi 5, 5, 32, 0
; ASM64PWR4-NEXT: rlwimi 5, 5, 16, 0, 15
; ASM64PWR4-NEXT: lfd 11, 0(8)
; ASM64PWR4-NEXT: ld 8, L..C48(2) # %const.10
; ASM64PWR4-NEXT: lfd 6, 0(4)
; ASM64PWR4-NEXT: lis 4, 16358
; ASM64PWR4-NEXT: ori 4, 4, 26214
; ASM64PWR4-NEXT: rldimi 4, 4, 32, 0
; ASM64PWR4-NEXT: lfd 9, 0(6)
; ASM64PWR4-NEXT: lis 6, 16339
; ASM64PWR4-NEXT: rlwimi 4, 4, 16, 0, 15
; ASM64PWR4-NEXT: ori 6, 6, 13107
; ASM64PWR4-NEXT: lfd 12, 0(9)
; ASM64PWR4-NEXT: lis 9, 4093
; ASM64PWR4-NEXT: ori 9, 9, 13107
; ASM64PWR4-NEXT: lfd 13, 0(8)
; ASM64PWR4-NEXT: lis 8, 16369
; ASM64PWR4-NEXT: ori 8, 8, 39321
; ASM64PWR4-NEXT: rldimi 6, 6, 32, 0
; ASM64PWR4-NEXT: std 31, 232(1) # 8-byte Folded Spill
; ASM64PWR4-NEXT: ld 31, L..C49(2) # %const.12
; ASM64PWR4-NEXT: rldic 9, 9, 34, 2
; ASM64PWR4-NEXT: rlwimi 6, 6, 16, 0, 15
; ASM64PWR4-NEXT: oris 9, 9, 52428
; ASM64PWR4-NEXT: lfs 5, 0(10)
; ASM64PWR4-NEXT: lis 10, 16329
; ASM64PWR4-NEXT: ori 10, 10, 39321
; ASM64PWR4-NEXT: std 7, 216(1)
; ASM64PWR4-NEXT: ori 7, 11, 13107
; ASM64PWR4-NEXT: ori 11, 12, 39321
; ASM64PWR4-NEXT: ori 12, 0, 39321
; ASM64PWR4-NEXT: std 4, 160(1)
; ASM64PWR4-NEXT: rldic 4, 8, 32, 2
; ASM64PWR4-NEXT: rldic 7, 7, 34, 2
; ASM64PWR4-NEXT: oris 4, 4, 39321
; ASM64PWR4-NEXT: std 30, 224(1) # 8-byte Folded Spill
; ASM64PWR4-NEXT: lis 30, 16313
; ASM64PWR4-NEXT: rldic 8, 11, 32, 2
; ASM64PWR4-NEXT: rldic 11, 12, 32, 2
; ASM64PWR4-NEXT: std 3, 200(1)
; ASM64PWR4-NEXT: ori 3, 30, 39321
; ASM64PWR4-NEXT: ori 4, 4, 39322
; ASM64PWR4-NEXT: rldic 3, 3, 32, 2
; ASM64PWR4-NEXT: std 5, 152(1)
; ASM64PWR4-NEXT: rldic 5, 10, 32, 2
; ASM64PWR4-NEXT: oris 5, 5, 39321
; ASM64PWR4-NEXT: oris 3, 3, 39321
; ASM64PWR4-NEXT: std 6, 128(1)
; ASM64PWR4-NEXT: oris 6, 7, 52428
; ASM64PWR4-NEXT: ori 7, 9, 52429
; ASM64PWR4-NEXT: li 9, 7
; ASM64PWR4-NEXT: lfs 10, 0(31)
; ASM64PWR4-NEXT: li 10, 8
; ASM64PWR4-NEXT: std 7, 208(1)
; ASM64PWR4-NEXT: oris 7, 8, 39321
; ASM64PWR4-NEXT: oris 8, 11, 39321
; ASM64PWR4-NEXT: ori 11, 3, 39322
; ASM64PWR4-NEXT: li 3, 1
; ASM64PWR4-NEXT: std 4, 192(1)
; ASM64PWR4-NEXT: ori 4, 6, 52429
; ASM64PWR4-NEXT: ori 6, 8, 39322
; ASM64PWR4-NEXT: std 4, 176(1)
; ASM64PWR4-NEXT: ori 4, 7, 39322
; ASM64PWR4-NEXT: ori 7, 5, 39322
; ASM64PWR4-NEXT: li 5, 3
; ASM64PWR4-NEXT: li 8, 6
; ASM64PWR4-NEXT: std 4, 168(1)
; ASM64PWR4-NEXT: li 4, 2
; ASM64PWR4-NEXT: std 6, 136(1)
; ASM64PWR4-NEXT: li 6, 4
; ASM64PWR4-NEXT: std 7, 120(1)
; ASM64PWR4-NEXT: li 7, 5
; ASM64PWR4-NEXT: std 11, 112(1)
; ASM64PWR4-NEXT: bl .mix_floats
; ASM64PWR4-NEXT: nop
; ASM64PWR4-NEXT: ld 31, 232(1) # 8-byte Folded Reload
; ASM64PWR4-NEXT: ld 30, 224(1) # 8-byte Folded Reload
; ASM64PWR4-NEXT: addi 1, 1, 240
; ASM64PWR4-NEXT: ld 0, 16(1)
; ASM64PWR4-NEXT: mtlr 0
; ASM64PWR4-NEXT: blr
entry:
%call = call i32 @mix_floats(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, double 1.000000e-01, double 2.000000e-01, double 3.000000e-01, double 4.000000e-01, double 5.000000e-01, double 6.000000e-01, double 0x3FE6666666666666, double 8.000000e-01, double 9.000000e-01, double 1.000000e+00, double 1.100000e+00, double 1.200000e+00, double 1.300000e+00, double 1.400000e+00)
ret void
}