llvm/llvm/test/CodeGen/AArch64/machine-combiner-subadd.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu %s -o - | FileCheck %s

; The test cases in this file check following transformation if the right form
; can reduce latency.
;     A - (B + C)  ==>   (A - B) - C

; 32 bit version.
define i32 @test1(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: test1:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    add w8, w0, #100
; CHECK-NEXT:    orr w9, w2, #0x80
; CHECK-NEXT:    eor w10, w1, w8, lsl #8
; CHECK-NEXT:    sub w8, w9, w8
; CHECK-NEXT:    sub w8, w8, w10
; CHECK-NEXT:    eor w0, w8, w10, asr #13
; CHECK-NEXT:    ret
entry:
  %c1  = or  i32 %c, 128
  %a1  = add i32 %a, 100
  %shl = shl i32 %a1, 8
  %xor = xor i32 %shl, %b
  %add = add i32 %xor, %a1
  %sub = sub i32 %c1, %add
  %shr = ashr i32 %xor, 13
  %xor2 = xor i32 %sub, %shr
  ret i32 %xor2
}

; 64 bit version.
define i64 @test2(i64 %a, i64 %b, i64 %c) {
; CHECK-LABEL: test2:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    add x8, x0, #100
; CHECK-NEXT:    orr x9, x2, #0x80
; CHECK-NEXT:    eor x10, x1, x8, lsl #8
; CHECK-NEXT:    sub x8, x9, x8
; CHECK-NEXT:    sub x8, x8, x10
; CHECK-NEXT:    eor x0, x8, x10, asr #13
; CHECK-NEXT:    ret
entry:
  %c1  = or  i64 %c, 128
  %a1  = add i64 %a, 100
  %shl = shl i64 %a1, 8
  %xor = xor i64 %shl, %b
  %add = add i64 %xor, %a1
  %sub = sub i64 %c1, %add
  %shr = ashr i64 %xor, 13
  %xor2 = xor i64 %sub, %shr
  ret i64 %xor2
}

; Negative test. The right form can't reduce latency.
define i32 @test3(i32 %a, i32 %b, i32 %c) {
; CHECK-LABEL: test3:
; CHECK:       // %bb.0: // %entry
; CHECK-NEXT:    add w8, w0, #100
; CHECK-NEXT:    orr w9, w2, #0x80
; CHECK-NEXT:    eor w10, w1, w8, lsl #8
; CHECK-NEXT:    add w8, w9, w8
; CHECK-NEXT:    sub w8, w10, w8
; CHECK-NEXT:    eor w0, w8, w10, asr #13
; CHECK-NEXT:    ret
entry:
  %c1  = or  i32 %c, 128
  %a1  = add i32 %a, 100
  %shl = shl i32 %a1, 8
  %xor = xor i32 %shl, %b
  %add = add i32 %c1, %a1
  %sub = sub i32 %xor, %add
  %shr = ashr i32 %xor, 13
  %xor2 = xor i32 %sub, %shr
  ret i32 %xor2
}