llvm/llvm/test/CodeGen/AArch64/urem-vector-lkk.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s

define <4 x i16> @fold_urem_vec_1(<4 x i16> %x) {
; CHECK-LABEL: fold_urem_vec_1:
; CHECK:       // %bb.0:
; CHECK-NEXT:    adrp x8, .LCPI0_0
; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI0_0]
; CHECK-NEXT:    adrp x8, .LCPI0_1
; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI0_1]
; CHECK-NEXT:    adrp x8, .LCPI0_2
; CHECK-NEXT:    ushl v1.4h, v0.4h, v1.4h
; CHECK-NEXT:    umull v1.4s, v1.4h, v2.4h
; CHECK-NEXT:    movi d2, #0000000000000000
; CHECK-NEXT:    shrn v1.4h, v1.4s, #16
; CHECK-NEXT:    fneg d2, d2
; CHECK-NEXT:    sub v3.4h, v0.4h, v1.4h
; CHECK-NEXT:    umull v2.4s, v3.4h, v2.4h
; CHECK-NEXT:    shrn v2.4h, v2.4s, #16
; CHECK-NEXT:    add v1.4h, v2.4h, v1.4h
; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI0_2]
; CHECK-NEXT:    adrp x8, .LCPI0_3
; CHECK-NEXT:    ushl v1.4h, v1.4h, v2.4h
; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI0_3]
; CHECK-NEXT:    mls v0.4h, v1.4h, v2.4h
; CHECK-NEXT:    ret
  %1 = urem <4 x i16> %x, <i16 95, i16 124, i16 98, i16 1003>
  ret <4 x i16> %1
}

define <4 x i16> @fold_urem_vec_2(<4 x i16> %x) {
; CHECK-LABEL: fold_urem_vec_2:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, #44151 // =0xac77
; CHECK-NEXT:    movi v2.4h, #95
; CHECK-NEXT:    dup v1.4h, w8
; CHECK-NEXT:    umull v1.4s, v0.4h, v1.4h
; CHECK-NEXT:    ushr v1.4s, v1.4s, #22
; CHECK-NEXT:    xtn v1.4h, v1.4s
; CHECK-NEXT:    mls v0.4h, v1.4h, v2.4h
; CHECK-NEXT:    ret
  %1 = urem <4 x i16> %x, <i16 95, i16 95, i16 95, i16 95>
  ret <4 x i16> %1
}


; Don't fold if we can combine urem with udiv.
define <4 x i16> @combine_urem_udiv(<4 x i16> %x) {
; CHECK-LABEL: combine_urem_udiv:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, #44151 // =0xac77
; CHECK-NEXT:    movi v2.4h, #95
; CHECK-NEXT:    dup v1.4h, w8
; CHECK-NEXT:    umull v1.4s, v0.4h, v1.4h
; CHECK-NEXT:    ushr v1.4s, v1.4s, #22
; CHECK-NEXT:    xtn v1.4h, v1.4s
; CHECK-NEXT:    mls v0.4h, v1.4h, v2.4h
; CHECK-NEXT:    add v0.4h, v0.4h, v1.4h
; CHECK-NEXT:    ret
  %1 = urem <4 x i16> %x, <i16 95, i16 95, i16 95, i16 95>
  %2 = udiv <4 x i16> %x, <i16 95, i16 95, i16 95, i16 95>
  %3 = add <4 x i16> %1, %2
  ret <4 x i16> %3
}


; Don't fold for divisors that are a power of two.
define <4 x i16> @dont_fold_urem_power_of_two(<4 x i16> %x) {
; CHECK-LABEL: dont_fold_urem_power_of_two:
; CHECK:       // %bb.0:
; CHECK-NEXT:    adrp x8, .LCPI3_0
; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI3_0]
; CHECK-NEXT:    adrp x8, .LCPI3_1
; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI3_1]
; CHECK-NEXT:    adrp x8, .LCPI3_2
; CHECK-NEXT:    umull v1.4s, v0.4h, v1.4h
; CHECK-NEXT:    shrn v1.4h, v1.4s, #16
; CHECK-NEXT:    ushl v1.4h, v1.4h, v2.4h
; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI3_2]
; CHECK-NEXT:    mls v0.4h, v1.4h, v2.4h
; CHECK-NEXT:    ret
  %1 = urem <4 x i16> %x, <i16 64, i16 32, i16 8, i16 95>
  ret <4 x i16> %1
}

; Don't fold if the divisor is one.
define <4 x i16> @dont_fold_urem_one(<4 x i16> %x) {
; CHECK-LABEL: dont_fold_urem_one:
; CHECK:       // %bb.0:
; CHECK-NEXT:    adrp x8, .LCPI4_0
; CHECK-NEXT:    movi d4, #0x0000000000ffff
; CHECK-NEXT:    ldr d1, [x8, :lo12:.LCPI4_0]
; CHECK-NEXT:    adrp x8, .LCPI4_1
; CHECK-NEXT:    ldr d3, [x8, :lo12:.LCPI4_1]
; CHECK-NEXT:    adrp x8, .LCPI4_2
; CHECK-NEXT:    umull v1.4s, v0.4h, v1.4h
; CHECK-NEXT:    shrn v1.4h, v1.4s, #16
; CHECK-NEXT:    sub v2.4h, v0.4h, v1.4h
; CHECK-NEXT:    umull v2.4s, v2.4h, v3.4h
; CHECK-NEXT:    movi d3, #0xffffffffffff0000
; CHECK-NEXT:    shrn v2.4h, v2.4s, #16
; CHECK-NEXT:    add v1.4h, v2.4h, v1.4h
; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI4_2]
; CHECK-NEXT:    adrp x8, .LCPI4_3
; CHECK-NEXT:    ushl v1.4h, v1.4h, v2.4h
; CHECK-NEXT:    and v2.8b, v0.8b, v4.8b
; CHECK-NEXT:    and v1.8b, v1.8b, v3.8b
; CHECK-NEXT:    orr v1.8b, v2.8b, v1.8b
; CHECK-NEXT:    ldr d2, [x8, :lo12:.LCPI4_3]
; CHECK-NEXT:    mls v0.4h, v1.4h, v2.4h
; CHECK-NEXT:    ret
  %1 = urem <4 x i16> %x, <i16 1, i16 654, i16 23, i16 5423>
  ret <4 x i16> %1
}

; Don't fold if the divisor is 2^16.
define <4 x i16> @dont_fold_urem_i16_smax(<4 x i16> %x) {
; CHECK-LABEL: dont_fold_urem_i16_smax:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ret
  %1 = urem <4 x i16> %x, <i16 1, i16 65536, i16 23, i16 5423>
  ret <4 x i16> %1
}

; Don't fold i64 urem.
define <4 x i64> @dont_fold_urem_i64(<4 x i64> %x) {
; CHECK-LABEL: dont_fold_urem_i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov x8, #17097 // =0x42c9
; CHECK-NEXT:    fmov x9, d1
; CHECK-NEXT:    mov x10, v1.d[1]
; CHECK-NEXT:    movk x8, #45590, lsl #16
; CHECK-NEXT:    mov x11, v0.d[1]
; CHECK-NEXT:    mov x12, #12109 // =0x2f4d
; CHECK-NEXT:    movk x8, #34192, lsl #32
; CHECK-NEXT:    movk x12, #52170, lsl #16
; CHECK-NEXT:    movi v0.2d, #0000000000000000
; CHECK-NEXT:    movk x8, #25644, lsl #48
; CHECK-NEXT:    movk x12, #28749, lsl #32
; CHECK-NEXT:    umulh x8, x9, x8
; CHECK-NEXT:    movk x12, #49499, lsl #48
; CHECK-NEXT:    lsr x13, x11, #1
; CHECK-NEXT:    umulh x12, x10, x12
; CHECK-NEXT:    sub x14, x9, x8
; CHECK-NEXT:    add x8, x8, x14, lsr #1
; CHECK-NEXT:    mov x14, #21445 // =0x53c5
; CHECK-NEXT:    movk x14, #1603, lsl #16
; CHECK-NEXT:    movk x14, #15432, lsl #32
; CHECK-NEXT:    lsr x8, x8, #4
; CHECK-NEXT:    movk x14, #25653, lsl #48
; CHECK-NEXT:    umulh x13, x13, x14
; CHECK-NEXT:    mov w14, #23 // =0x17
; CHECK-NEXT:    msub x8, x8, x14, x9
; CHECK-NEXT:    lsr x9, x12, #12
; CHECK-NEXT:    mov w12, #5423 // =0x152f
; CHECK-NEXT:    msub x9, x9, x12, x10
; CHECK-NEXT:    mov w12, #654 // =0x28e
; CHECK-NEXT:    lsr x10, x13, #7
; CHECK-NEXT:    msub x10, x10, x12, x11
; CHECK-NEXT:    fmov d1, x8
; CHECK-NEXT:    mov v1.d[1], x9
; CHECK-NEXT:    mov v0.d[1], x10
; CHECK-NEXT:    ret
  %1 = urem <4 x i64> %x, <i64 1, i64 654, i64 23, i64 5423>
  ret <4 x i64> %1
}

define <16 x i8> @fold_urem_v16i8(<16 x i8> %x) {
; CHECK-LABEL: fold_urem_v16i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movi v1.16b, #205
; CHECK-NEXT:    umull2 v2.8h, v0.16b, v1.16b
; CHECK-NEXT:    umull v1.8h, v0.8b, v1.8b
; CHECK-NEXT:    uzp2 v1.16b, v1.16b, v2.16b
; CHECK-NEXT:    movi v2.16b, #10
; CHECK-NEXT:    ushr v1.16b, v1.16b, #3
; CHECK-NEXT:    mls v0.16b, v1.16b, v2.16b
; CHECK-NEXT:    ret
  %1 = urem <16 x i8> %x, <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
  ret <16 x i8> %1
}

define <8 x i8> @fold_urem_v8i8(<8 x i8> %x) {
; CHECK-LABEL: fold_urem_v8i8:
; CHECK:       // %bb.0:
; CHECK-NEXT:    movi v1.8b, #205
; CHECK-NEXT:    movi v2.8b, #10
; CHECK-NEXT:    umull v1.8h, v0.8b, v1.8b
; CHECK-NEXT:    shrn v1.8b, v1.8h, #8
; CHECK-NEXT:    ushr v1.8b, v1.8b, #3
; CHECK-NEXT:    mls v0.8b, v1.8b, v2.8b
; CHECK-NEXT:    ret
  %1 = urem <8 x i8> %x, <i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10, i8 10>
  ret <8 x i8> %1
}

define <8 x i16> @fold_urem_v8i16(<8 x i16> %x) {
; CHECK-LABEL: fold_urem_v8i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, #52429 // =0xcccd
; CHECK-NEXT:    dup v1.8h, w8
; CHECK-NEXT:    umull2 v2.4s, v0.8h, v1.8h
; CHECK-NEXT:    umull v1.4s, v0.4h, v1.4h
; CHECK-NEXT:    uzp2 v1.8h, v1.8h, v2.8h
; CHECK-NEXT:    movi v2.8h, #10
; CHECK-NEXT:    ushr v1.8h, v1.8h, #3
; CHECK-NEXT:    mls v0.8h, v1.8h, v2.8h
; CHECK-NEXT:    ret
  %1 = urem <8 x i16> %x, <i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10, i16 10>
  ret <8 x i16> %1
}

define <4 x i16> @fold_urem_v4i16(<4 x i16> %x) {
; CHECK-LABEL: fold_urem_v4i16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, #52429 // =0xcccd
; CHECK-NEXT:    movi v2.4h, #10
; CHECK-NEXT:    dup v1.4h, w8
; CHECK-NEXT:    umull v1.4s, v0.4h, v1.4h
; CHECK-NEXT:    ushr v1.4s, v1.4s, #19
; CHECK-NEXT:    xtn v1.4h, v1.4s
; CHECK-NEXT:    mls v0.4h, v1.4h, v2.4h
; CHECK-NEXT:    ret
  %1 = urem <4 x i16> %x, <i16 10, i16 10, i16 10, i16 10>
  ret <4 x i16> %1
}

define <4 x i32> @fold_urem_v4i32(<4 x i32> %x) {
; CHECK-LABEL: fold_urem_v4i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, #52429 // =0xcccd
; CHECK-NEXT:    movk w8, #52428, lsl #16
; CHECK-NEXT:    dup v1.4s, w8
; CHECK-NEXT:    umull2 v2.2d, v0.4s, v1.4s
; CHECK-NEXT:    umull v1.2d, v0.2s, v1.2s
; CHECK-NEXT:    uzp2 v1.4s, v1.4s, v2.4s
; CHECK-NEXT:    movi v2.4s, #10
; CHECK-NEXT:    ushr v1.4s, v1.4s, #3
; CHECK-NEXT:    mls v0.4s, v1.4s, v2.4s
; CHECK-NEXT:    ret
  %1 = urem <4 x i32> %x, <i32 10, i32 10, i32 10, i32 10>
  ret <4 x i32> %1
}

define <2 x i32> @fold_urem_v2i32(<2 x i32> %x) {
; CHECK-LABEL: fold_urem_v2i32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    mov w8, #52429 // =0xcccd
; CHECK-NEXT:    movi v2.2s, #10
; CHECK-NEXT:    movk w8, #52428, lsl #16
; CHECK-NEXT:    dup v1.2s, w8
; CHECK-NEXT:    umull v1.2d, v0.2s, v1.2s
; CHECK-NEXT:    ushr v1.2d, v1.2d, #35
; CHECK-NEXT:    xtn v1.2s, v1.2d
; CHECK-NEXT:    mls v0.2s, v1.2s, v2.2s
; CHECK-NEXT:    ret
  %1 = urem <2 x i32> %x, <i32 10, i32 10>
  ret <2 x i32> %1
}

define <2 x i64> @fold_urem_v2i64(<2 x i64> %x) {
; CHECK-LABEL: fold_urem_v2i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    fmov x10, d0
; CHECK-NEXT:    mov x8, #-3689348814741910324 // =0xcccccccccccccccc
; CHECK-NEXT:    mov x9, v0.d[1]
; CHECK-NEXT:    movk x8, #52429
; CHECK-NEXT:    mov w12, #10 // =0xa
; CHECK-NEXT:    umulh x11, x10, x8
; CHECK-NEXT:    umulh x8, x9, x8
; CHECK-NEXT:    lsr x11, x11, #3
; CHECK-NEXT:    msub x10, x11, x12, x10
; CHECK-NEXT:    lsr x8, x8, #3
; CHECK-NEXT:    msub x8, x8, x12, x9
; CHECK-NEXT:    fmov d0, x10
; CHECK-NEXT:    mov v0.d[1], x8
; CHECK-NEXT:    ret
  %1 = urem <2 x i64> %x, <i64 10, i64 10>
  ret <2 x i64> %1
}

define <1 x i64> @fold_urem_v1i64(<1 x i64> %x) {
; CHECK-LABEL: fold_urem_v1i64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT:    fmov x9, d0
; CHECK-NEXT:    mov x8, #-3689348814741910324 // =0xcccccccccccccccc
; CHECK-NEXT:    mov w10, #10 // =0xa
; CHECK-NEXT:    movk x8, #52429
; CHECK-NEXT:    umulh x8, x9, x8
; CHECK-NEXT:    lsr x8, x8, #3
; CHECK-NEXT:    msub x8, x8, x10, x9
; CHECK-NEXT:    fmov d0, x8
; CHECK-NEXT:    ret
  %1 = urem <1 x i64> %x, <i64 10>
  ret <1 x i64> %1
}