llvm/llvm/test/CodeGen/AArch64/sve-llrint.ll

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64 -mattr=+sve | FileCheck %s

define <vscale x 1 x i64> @llrint_v1i64_v1f16(<vscale x 1 x half> %x) {
; CHECK-LABEL: llrint_v1i64_v1f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov w8, #64511 // =0xfbff
; CHECK-NEXT:    mov z2.d, #0x8000000000000000
; CHECK-NEXT:    mov z1.h, w8
; CHECK-NEXT:    mov w8, #31743 // =0x7bff
; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
; CHECK-NEXT:    mov z3.h, w8
; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
; CHECK-NEXT:    movprfx z1, z0
; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.h
; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z3.h
; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
; CHECK-NEXT:    mov z1.d, p1/m, z2.d
; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
; CHECK-NEXT:    ret
  %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half> %x)
  ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f16(<vscale x 1 x half>)

define <vscale x 2 x i64> @llrint_v1i64_v2f16(<vscale x 2 x half> %x) {
; CHECK-LABEL: llrint_v1i64_v2f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov w8, #64511 // =0xfbff
; CHECK-NEXT:    mov z2.d, #0x8000000000000000
; CHECK-NEXT:    mov z1.h, w8
; CHECK-NEXT:    mov w8, #31743 // =0x7bff
; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
; CHECK-NEXT:    mov z3.h, w8
; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z1.h
; CHECK-NEXT:    movprfx z1, z0
; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.h
; CHECK-NEXT:    fcmgt p2.h, p0/z, z0.h, z3.h
; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
; CHECK-NEXT:    mov z1.d, p1/m, z2.d
; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
; CHECK-NEXT:    ret
  %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half> %x)
  ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f16(<vscale x 2 x half>)

define <vscale x 4 x i64> @llrint_v4i64_v4f16(<vscale x 4 x half> %x) {
; CHECK-LABEL: llrint_v4i64_v4f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-1
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    uunpklo z1.d, z0.s
; CHECK-NEXT:    uunpkhi z0.d, z0.s
; CHECK-NEXT:    mov w8, #64511 // =0xfbff
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov z2.h, w8
; CHECK-NEXT:    mov w8, #31743 // =0x7bff
; CHECK-NEXT:    mov z3.h, w8
; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
; CHECK-NEXT:    fcmge p1.h, p0/z, z1.h, z2.h
; CHECK-NEXT:    fcmge p2.h, p0/z, z0.h, z2.h
; CHECK-NEXT:    mov z2.d, #0x8000000000000000
; CHECK-NEXT:    movprfx z4, z1
; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.h
; CHECK-NEXT:    movprfx z5, z0
; CHECK-NEXT:    fcvtzs z5.d, p0/m, z0.h
; CHECK-NEXT:    fcmgt p3.h, p0/z, z1.h, z3.h
; CHECK-NEXT:    fcmgt p4.h, p0/z, z0.h, z3.h
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
; CHECK-NEXT:    fcmuo p1.h, p0/z, z1.h, z1.h
; CHECK-NEXT:    fcmuo p0.h, p0/z, z0.h, z0.h
; CHECK-NEXT:    sel z2.d, p2, z2.d, z5.d
; CHECK-NEXT:    sel z0.d, p3, z6.d, z3.d
; CHECK-NEXT:    sel z1.d, p4, z6.d, z2.d
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
; CHECK-NEXT:    mov z1.d, p0/m, #0 // =0x0
; CHECK-NEXT:    addvl sp, sp, #1
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half> %x)
  ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f16(<vscale x 4 x half>)

define <vscale x 8 x i64> @llrint_v8i64_v8f16(<vscale x 8 x half> %x) {
; CHECK-LABEL: llrint_v8i64_v8f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-1
; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    uunpklo z1.s, z0.h
; CHECK-NEXT:    uunpkhi z0.s, z0.h
; CHECK-NEXT:    mov w8, #64511 // =0xfbff
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov z4.h, w8
; CHECK-NEXT:    mov w8, #31743 // =0x7bff
; CHECK-NEXT:    mov z6.h, w8
; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
; CHECK-NEXT:    uunpklo z2.d, z1.s
; CHECK-NEXT:    uunpkhi z1.d, z1.s
; CHECK-NEXT:    uunpklo z3.d, z0.s
; CHECK-NEXT:    uunpkhi z0.d, z0.s
; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
; CHECK-NEXT:    frintx z1.h, p0/m, z1.h
; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
; CHECK-NEXT:    movprfx z5, z0
; CHECK-NEXT:    frintx z5.h, p0/m, z0.h
; CHECK-NEXT:    mov z0.d, #0x8000000000000000
; CHECK-NEXT:    fcmge p1.h, p0/z, z2.h, z4.h
; CHECK-NEXT:    fcmge p2.h, p0/z, z1.h, z4.h
; CHECK-NEXT:    fcmge p3.h, p0/z, z3.h, z4.h
; CHECK-NEXT:    fcmge p4.h, p0/z, z5.h, z4.h
; CHECK-NEXT:    movprfx z4, z2
; CHECK-NEXT:    fcvtzs z4.d, p0/m, z2.h
; CHECK-NEXT:    movprfx z7, z1
; CHECK-NEXT:    fcvtzs z7.d, p0/m, z1.h
; CHECK-NEXT:    movprfx z24, z3
; CHECK-NEXT:    fcvtzs z24.d, p0/m, z3.h
; CHECK-NEXT:    movprfx z25, z5
; CHECK-NEXT:    fcvtzs z25.d, p0/m, z5.h
; CHECK-NEXT:    fcmgt p7.h, p0/z, z3.h, z6.h
; CHECK-NEXT:    fcmgt p5.h, p0/z, z2.h, z6.h
; CHECK-NEXT:    fcmgt p6.h, p0/z, z1.h, z6.h
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    mov z4.d, p1/m, z0.d
; CHECK-NEXT:    fcmgt p1.h, p0/z, z5.h, z6.h
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    sel z6.d, p2, z0.d, z7.d
; CHECK-NEXT:    fcmuo p2.h, p0/z, z2.h, z2.h
; CHECK-NEXT:    sel z7.d, p3, z0.d, z24.d
; CHECK-NEXT:    fcmuo p3.h, p0/z, z1.h, z1.h
; CHECK-NEXT:    sel z24.d, p4, z0.d, z25.d
; CHECK-NEXT:    fcmuo p4.h, p0/z, z3.h, z3.h
; CHECK-NEXT:    fcmuo p0.h, p0/z, z5.h, z5.h
; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    sel z3.d, p1, z26.d, z24.d
; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
; CHECK-NEXT:    mov z2.d, p4/m, #0 // =0x0
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z3.d, p0/m, #0 // =0x0
; CHECK-NEXT:    addvl sp, sp, #1
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half> %x)
  ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f16(<vscale x 8 x half>)

define <vscale x 16 x i64> @llrint_v16i64_v16f16(<vscale x 16 x half> %x) {
; CHECK-LABEL: llrint_v16i64_v16f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-3
; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str z9, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z8, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
; CHECK-NEXT:    uunpklo z2.s, z0.h
; CHECK-NEXT:    uunpkhi z0.s, z0.h
; CHECK-NEXT:    mov w8, #64511 // =0xfbff
; CHECK-NEXT:    uunpklo z4.s, z1.h
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    uunpkhi z1.s, z1.h
; CHECK-NEXT:    mov z5.h, w8
; CHECK-NEXT:    mov w8, #31743 // =0x7bff
; CHECK-NEXT:    mov z25.d, #0x8000000000000000
; CHECK-NEXT:    mov z27.h, w8
; CHECK-NEXT:    mov z7.d, #0x7fffffffffffffff
; CHECK-NEXT:    uunpklo z3.d, z2.s
; CHECK-NEXT:    uunpkhi z2.d, z2.s
; CHECK-NEXT:    uunpklo z6.d, z0.s
; CHECK-NEXT:    uunpkhi z0.d, z0.s
; CHECK-NEXT:    uunpklo z24.d, z4.s
; CHECK-NEXT:    uunpkhi z4.d, z4.s
; CHECK-NEXT:    uunpklo z26.d, z1.s
; CHECK-NEXT:    uunpkhi z1.d, z1.s
; CHECK-NEXT:    frintx z2.h, p0/m, z2.h
; CHECK-NEXT:    frintx z3.h, p0/m, z3.h
; CHECK-NEXT:    frintx z6.h, p0/m, z6.h
; CHECK-NEXT:    movprfx z28, z0
; CHECK-NEXT:    frintx z28.h, p0/m, z0.h
; CHECK-NEXT:    movprfx z29, z4
; CHECK-NEXT:    frintx z29.h, p0/m, z4.h
; CHECK-NEXT:    frintx z24.h, p0/m, z24.h
; CHECK-NEXT:    movprfx z30, z1
; CHECK-NEXT:    frintx z30.h, p0/m, z1.h
; CHECK-NEXT:    frintx z26.h, p0/m, z26.h
; CHECK-NEXT:    fcmge p5.h, p0/z, z2.h, z5.h
; CHECK-NEXT:    fcmge p2.h, p0/z, z3.h, z5.h
; CHECK-NEXT:    movprfx z1, z2
; CHECK-NEXT:    fcvtzs z1.d, p0/m, z2.h
; CHECK-NEXT:    movprfx z0, z3
; CHECK-NEXT:    fcvtzs z0.d, p0/m, z3.h
; CHECK-NEXT:    fcmge p6.h, p0/z, z6.h, z5.h
; CHECK-NEXT:    fcmgt p3.h, p0/z, z3.h, z27.h
; CHECK-NEXT:    fcmuo p1.h, p0/z, z3.h, z3.h
; CHECK-NEXT:    fcmge p7.h, p0/z, z28.h, z5.h
; CHECK-NEXT:    movprfx z3, z6
; CHECK-NEXT:    fcvtzs z3.d, p0/m, z6.h
; CHECK-NEXT:    fcmge p8.h, p0/z, z24.h, z5.h
; CHECK-NEXT:    fcmgt p4.h, p0/z, z2.h, z27.h
; CHECK-NEXT:    fcmge p9.h, p0/z, z26.h, z5.h
; CHECK-NEXT:    not p5.b, p0/z, p5.b
; CHECK-NEXT:    movprfx z4, z24
; CHECK-NEXT:    fcvtzs z4.d, p0/m, z24.h
; CHECK-NEXT:    fcmge p10.h, p0/z, z30.h, z5.h
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    movprfx z31, z26
; CHECK-NEXT:    fcvtzs z31.d, p0/m, z26.h
; CHECK-NEXT:    movprfx z8, z30
; CHECK-NEXT:    fcvtzs z8.d, p0/m, z30.h
; CHECK-NEXT:    mov z1.d, p5/m, z25.d
; CHECK-NEXT:    fcmge p5.h, p0/z, z29.h, z5.h
; CHECK-NEXT:    not p6.b, p0/z, p6.b
; CHECK-NEXT:    mov z0.d, p2/m, z25.d
; CHECK-NEXT:    fcmuo p2.h, p0/z, z2.h, z2.h
; CHECK-NEXT:    movprfx z2, z28
; CHECK-NEXT:    fcvtzs z2.d, p0/m, z28.h
; CHECK-NEXT:    movprfx z5, z29
; CHECK-NEXT:    fcvtzs z5.d, p0/m, z29.h
; CHECK-NEXT:    not p7.b, p0/z, p7.b
; CHECK-NEXT:    mov z3.d, p6/m, z25.d
; CHECK-NEXT:    not p6.b, p0/z, p8.b
; CHECK-NEXT:    fcmgt p8.h, p0/z, z6.h, z27.h
; CHECK-NEXT:    mov z1.d, p4/m, z7.d
; CHECK-NEXT:    not p5.b, p0/z, p5.b
; CHECK-NEXT:    mov z0.d, p3/m, z7.d
; CHECK-NEXT:    fcmgt p3.h, p0/z, z29.h, z27.h
; CHECK-NEXT:    sel z9.d, p7, z25.d, z2.d
; CHECK-NEXT:    not p7.b, p0/z, p9.b
; CHECK-NEXT:    mov z4.d, p6/m, z25.d
; CHECK-NEXT:    not p6.b, p0/z, p10.b
; CHECK-NEXT:    fcmgt p10.h, p0/z, z28.h, z27.h
; CHECK-NEXT:    mov z5.d, p5/m, z25.d
; CHECK-NEXT:    fcmgt p5.h, p0/z, z24.h, z27.h
; CHECK-NEXT:    fcmuo p9.h, p0/z, z6.h, z6.h
; CHECK-NEXT:    sel z6.d, p7, z25.d, z31.d
; CHECK-NEXT:    sel z25.d, p6, z25.d, z8.d
; CHECK-NEXT:    ldr z8, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    fcmgt p6.h, p0/z, z26.h, z27.h
; CHECK-NEXT:    fcmgt p7.h, p0/z, z30.h, z27.h
; CHECK-NEXT:    fcmuo p4.h, p0/z, z28.h, z28.h
; CHECK-NEXT:    sel z2.d, p8, z7.d, z3.d
; CHECK-NEXT:    sel z3.d, p10, z7.d, z9.d
; CHECK-NEXT:    ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    fcmuo p8.h, p0/z, z29.h, z29.h
; CHECK-NEXT:    mov z4.d, p5/m, z7.d
; CHECK-NEXT:    fcmuo p5.h, p0/z, z24.h, z24.h
; CHECK-NEXT:    fcmuo p10.h, p0/z, z26.h, z26.h
; CHECK-NEXT:    mov z5.d, p3/m, z7.d
; CHECK-NEXT:    mov z6.d, p6/m, z7.d
; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    fcmuo p0.h, p0/z, z30.h, z30.h
; CHECK-NEXT:    sel z7.d, p7, z7.d, z25.d
; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z3.d, p4/m, #0 // =0x0
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z4.d, p5/m, #0 // =0x0
; CHECK-NEXT:    mov z5.d, p8/m, #0 // =0x0
; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z6.d, p10/m, #0 // =0x0
; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
; CHECK-NEXT:    mov z7.d, p0/m, #0 // =0x0
; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    addvl sp, sp, #3
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half> %x)
  ret <vscale x 16 x i64> %a
}
declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f16(<vscale x 16 x half>)

define <vscale x 32 x i64> @llrint_v32i64_v32f16(<vscale x 32 x half> %x) {
; CHECK-LABEL: llrint_v32i64_v32f16:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-17
; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z15, [sp, #9, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z14, [sp, #10, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z13, [sp, #11, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z12, [sp, #12, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z11, [sp, #13, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z10, [sp, #14, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z9, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z8, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 136 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
; CHECK-NEXT:    uunpkhi z5.s, z0.h
; CHECK-NEXT:    uunpklo z4.s, z0.h
; CHECK-NEXT:    mov w9, #64511 // =0xfbff
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    uunpklo z6.s, z1.h
; CHECK-NEXT:    mov z30.h, w9
; CHECK-NEXT:    uunpkhi z10.s, z1.h
; CHECK-NEXT:    mov w9, #31743 // =0x7bff
; CHECK-NEXT:    mov z29.d, #0x8000000000000000
; CHECK-NEXT:    uunpklo z8.s, z2.h
; CHECK-NEXT:    uunpkhi z13.s, z3.h
; CHECK-NEXT:    uunpklo z18.s, z3.h
; CHECK-NEXT:    uunpklo z7.d, z5.s
; CHECK-NEXT:    uunpklo z0.d, z4.s
; CHECK-NEXT:    uunpkhi z4.d, z4.s
; CHECK-NEXT:    uunpkhi z24.d, z5.s
; CHECK-NEXT:    uunpklo z25.d, z6.s
; CHECK-NEXT:    uunpkhi z26.d, z6.s
; CHECK-NEXT:    uunpklo z27.d, z10.s
; CHECK-NEXT:    uunpkhi z10.d, z10.s
; CHECK-NEXT:    uunpklo z12.d, z8.s
; CHECK-NEXT:    uunpkhi z16.d, z8.s
; CHECK-NEXT:    movprfx z5, z7
; CHECK-NEXT:    frintx z5.h, p0/m, z7.h
; CHECK-NEXT:    movprfx z1, z4
; CHECK-NEXT:    frintx z1.h, p0/m, z4.h
; CHECK-NEXT:    frintx z0.h, p0/m, z0.h
; CHECK-NEXT:    movprfx z6, z24
; CHECK-NEXT:    frintx z6.h, p0/m, z24.h
; CHECK-NEXT:    movprfx z24, z25
; CHECK-NEXT:    frintx z24.h, p0/m, z25.h
; CHECK-NEXT:    movprfx z25, z26
; CHECK-NEXT:    frintx z25.h, p0/m, z26.h
; CHECK-NEXT:    movprfx z28, z27
; CHECK-NEXT:    frintx z28.h, p0/m, z27.h
; CHECK-NEXT:    movprfx z8, z10
; CHECK-NEXT:    frintx z8.h, p0/m, z10.h
; CHECK-NEXT:    mov z7.h, w9
; CHECK-NEXT:    mov z4.d, #0x7fffffffffffffff
; CHECK-NEXT:    rdvl x9, #15
; CHECK-NEXT:    fcmge p3.h, p0/z, z5.h, z30.h
; CHECK-NEXT:    movprfx z11, z5
; CHECK-NEXT:    fcvtzs z11.d, p0/m, z5.h
; CHECK-NEXT:    fcmge p2.h, p0/z, z1.h, z30.h
; CHECK-NEXT:    fcmge p1.h, p0/z, z0.h, z30.h
; CHECK-NEXT:    fcmge p4.h, p0/z, z6.h, z30.h
; CHECK-NEXT:    movprfx z9, z6
; CHECK-NEXT:    fcvtzs z9.d, p0/m, z6.h
; CHECK-NEXT:    movprfx z15, z25
; CHECK-NEXT:    fcvtzs z15.d, p0/m, z25.h
; CHECK-NEXT:    movprfx z14, z24
; CHECK-NEXT:    fcvtzs z14.d, p0/m, z24.h
; CHECK-NEXT:    movprfx z26, z0
; CHECK-NEXT:    fcvtzs z26.d, p0/m, z0.h
; CHECK-NEXT:    movprfx z19, z28
; CHECK-NEXT:    fcvtzs z19.d, p0/m, z28.h
; CHECK-NEXT:    movprfx z31, z1
; CHECK-NEXT:    fcvtzs z31.d, p0/m, z1.h
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    not p6.b, p0/z, p2.b
; CHECK-NEXT:    fcmge p2.h, p0/z, z25.h, z30.h
; CHECK-NEXT:    sel z27.d, p3, z29.d, z11.d
; CHECK-NEXT:    uunpkhi z11.s, z2.h
; CHECK-NEXT:    not p5.b, p0/z, p1.b
; CHECK-NEXT:    fcmge p1.h, p0/z, z24.h, z30.h
; CHECK-NEXT:    not p3.b, p0/z, p4.b
; CHECK-NEXT:    fcmge p4.h, p0/z, z28.h, z30.h
; CHECK-NEXT:    mov z26.d, p5/m, z29.d
; CHECK-NEXT:    mov z31.d, p6/m, z29.d
; CHECK-NEXT:    sel z2.d, p3, z29.d, z9.d
; CHECK-NEXT:    movprfx z9, z12
; CHECK-NEXT:    frintx z9.h, p0/m, z12.h
; CHECK-NEXT:    uunpkhi z12.d, z13.s
; CHECK-NEXT:    uunpklo z17.d, z11.s
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    sel z3.d, p2, z29.d, z15.d
; CHECK-NEXT:    uunpklo z15.d, z13.s
; CHECK-NEXT:    fcmge p2.h, p0/z, z8.h, z30.h
; CHECK-NEXT:    sel z10.d, p1, z29.d, z14.d
; CHECK-NEXT:    movprfx z14, z16
; CHECK-NEXT:    frintx z14.h, p0/m, z16.h
; CHECK-NEXT:    uunpkhi z16.d, z18.s
; CHECK-NEXT:    movprfx z13, z17
; CHECK-NEXT:    frintx z13.h, p0/m, z17.h
; CHECK-NEXT:    movprfx z20, z12
; CHECK-NEXT:    frintx z20.h, p0/m, z12.h
; CHECK-NEXT:    fcmge p3.h, p0/z, z9.h, z30.h
; CHECK-NEXT:    uunpkhi z17.d, z11.s
; CHECK-NEXT:    uunpklo z18.d, z18.s
; CHECK-NEXT:    movprfx z12, z8
; CHECK-NEXT:    fcvtzs z12.d, p0/m, z8.h
; CHECK-NEXT:    movprfx z21, z15
; CHECK-NEXT:    frintx z21.h, p0/m, z15.h
; CHECK-NEXT:    not p1.b, p0/z, p4.b
; CHECK-NEXT:    movprfx z15, z9
; CHECK-NEXT:    fcvtzs z15.d, p0/m, z9.h
; CHECK-NEXT:    frintx z16.h, p0/m, z16.h
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    movprfx z22, z14
; CHECK-NEXT:    fcvtzs z22.d, p0/m, z14.h
; CHECK-NEXT:    fcmge p4.h, p0/z, z13.h, z30.h
; CHECK-NEXT:    fcmge p5.h, p0/z, z20.h, z30.h
; CHECK-NEXT:    sel z11.d, p1, z29.d, z19.d
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    frintx z17.h, p0/m, z17.h
; CHECK-NEXT:    frintx z18.h, p0/m, z18.h
; CHECK-NEXT:    movprfx z19, z20
; CHECK-NEXT:    fcvtzs z19.d, p0/m, z20.h
; CHECK-NEXT:    mov z12.d, p2/m, z29.d
; CHECK-NEXT:    fcmge p2.h, p0/z, z21.h, z30.h
; CHECK-NEXT:    fcmge p1.h, p0/z, z14.h, z30.h
; CHECK-NEXT:    mov z15.d, p3/m, z29.d
; CHECK-NEXT:    movprfx z23, z21
; CHECK-NEXT:    fcvtzs z23.d, p0/m, z21.h
; CHECK-NEXT:    not p3.b, p0/z, p4.b
; CHECK-NEXT:    fcmge p4.h, p0/z, z16.h, z30.h
; CHECK-NEXT:    fcmgt p8.h, p0/z, z21.h, z7.h
; CHECK-NEXT:    not p5.b, p0/z, p5.b
; CHECK-NEXT:    fcmge p6.h, p0/z, z17.h, z30.h
; CHECK-NEXT:    fcmge p7.h, p0/z, z18.h, z30.h
; CHECK-NEXT:    movprfx z30, z16
; CHECK-NEXT:    fcvtzs z30.d, p0/m, z16.h
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    fcmuo p9.h, p0/z, z21.h, z21.h
; CHECK-NEXT:    mov z19.d, p5/m, z29.d
; CHECK-NEXT:    fcmgt p5.h, p0/z, z20.h, z7.h
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    mov z23.d, p2/m, z29.d
; CHECK-NEXT:    fcmuo p2.h, p0/z, z20.h, z20.h
; CHECK-NEXT:    movprfx z20, z18
; CHECK-NEXT:    fcvtzs z20.d, p0/m, z18.h
; CHECK-NEXT:    movprfx z21, z13
; CHECK-NEXT:    fcvtzs z21.d, p0/m, z13.h
; CHECK-NEXT:    mov z22.d, p1/m, z29.d
; CHECK-NEXT:    not p1.b, p0/z, p7.b
; CHECK-NEXT:    mov z30.d, p4/m, z29.d
; CHECK-NEXT:    fcmgt p4.h, p0/z, z18.h, z7.h
; CHECK-NEXT:    mov z19.d, p5/m, z4.d
; CHECK-NEXT:    fcmuo p7.h, p0/z, z18.h, z18.h
; CHECK-NEXT:    movprfx z18, z17
; CHECK-NEXT:    fcvtzs z18.d, p0/m, z17.h
; CHECK-NEXT:    fcmgt p5.h, p0/z, z16.h, z7.h
; CHECK-NEXT:    not p6.b, p0/z, p6.b
; CHECK-NEXT:    mov z23.d, p8/m, z4.d
; CHECK-NEXT:    mov z20.d, p1/m, z29.d
; CHECK-NEXT:    mov z21.d, p3/m, z29.d
; CHECK-NEXT:    fcmuo p3.h, p0/z, z16.h, z16.h
; CHECK-NEXT:    mov z19.d, p2/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p2.h, p0/z, z17.h, z7.h
; CHECK-NEXT:    ptrue p1.b
; CHECK-NEXT:    sel z29.d, p6, z29.d, z18.d
; CHECK-NEXT:    mov z23.d, p9/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p6.h, p0/z, z14.h, z7.h
; CHECK-NEXT:    mov z30.d, p5/m, z4.d
; CHECK-NEXT:    sel z16.d, p4, z4.d, z20.d
; CHECK-NEXT:    fcmuo p4.h, p0/z, z17.h, z17.h
; CHECK-NEXT:    st1b { z19.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #14
; CHECK-NEXT:    fcmgt p5.h, p0/z, z1.h, z7.h
; CHECK-NEXT:    st1b { z23.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #13
; CHECK-NEXT:    mov z29.d, p2/m, z4.d
; CHECK-NEXT:    mov z30.d, p3/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p3.h, p0/z, z13.h, z7.h
; CHECK-NEXT:    mov z16.d, p7/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p2.h, p0/z, z9.h, z7.h
; CHECK-NEXT:    fcmuo p7.h, p0/z, z14.h, z14.h
; CHECK-NEXT:    mov z29.d, p4/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p4.h, p0/z, z13.h, z13.h
; CHECK-NEXT:    st1b { z30.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #12
; CHECK-NEXT:    sel z30.d, p5, z4.d, z31.d
; CHECK-NEXT:    st1b { z16.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #11
; CHECK-NEXT:    sel z31.d, p3, z4.d, z21.d
; CHECK-NEXT:    st1b { z29.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #10
; CHECK-NEXT:    fcmgt p5.h, p0/z, z24.h, z7.h
; CHECK-NEXT:    fcmgt p3.h, p0/z, z28.h, z7.h
; CHECK-NEXT:    sel z13.d, p2, z4.d, z15.d
; CHECK-NEXT:    fcmuo p2.h, p0/z, z9.h, z9.h
; CHECK-NEXT:    sel z29.d, p6, z4.d, z22.d
; CHECK-NEXT:    mov z31.d, p4/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p4.h, p0/z, z8.h, z7.h
; CHECK-NEXT:    fcmgt p6.h, p0/z, z5.h, z7.h
; CHECK-NEXT:    sel z9.d, p5, z4.d, z10.d
; CHECK-NEXT:    fcmgt p5.h, p0/z, z6.h, z7.h
; CHECK-NEXT:    st1b { z31.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #9
; CHECK-NEXT:    mov z29.d, p7/m, #0 // =0x0
; CHECK-NEXT:    sel z10.d, p3, z4.d, z11.d
; CHECK-NEXT:    fcmgt p3.h, p0/z, z25.h, z7.h
; CHECK-NEXT:    mov z13.d, p2/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p7.h, p0/z, z8.h, z8.h
; CHECK-NEXT:    fcmuo p2.h, p0/z, z28.h, z28.h
; CHECK-NEXT:    sel z28.d, p4, z4.d, z12.d
; CHECK-NEXT:    st1b { z29.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #8
; CHECK-NEXT:    fcmuo p4.h, p0/z, z25.h, z25.h
; CHECK-NEXT:    st1b { z13.b }, p1, [x8, x9]
; CHECK-NEXT:    fcmuo p1.h, p0/z, z24.h, z24.h
; CHECK-NEXT:    mov z2.d, p5/m, z4.d
; CHECK-NEXT:    mov z3.d, p3/m, z4.d
; CHECK-NEXT:    fcmgt p3.h, p0/z, z0.h, z7.h
; CHECK-NEXT:    mov z28.d, p7/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p7.h, p0/z, z6.h, z6.h
; CHECK-NEXT:    mov z10.d, p2/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p2.h, p0/z, z5.h, z5.h
; CHECK-NEXT:    sel z5.d, p6, z4.d, z27.d
; CHECK-NEXT:    mov z3.d, p4/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p4.h, p0/z, z1.h, z1.h
; CHECK-NEXT:    mov z9.d, p1/m, #0 // =0x0
; CHECK-NEXT:    st1d { z28.d }, p0, [x8, #7, mul vl]
; CHECK-NEXT:    fcmuo p1.h, p0/z, z0.h, z0.h
; CHECK-NEXT:    sel z0.d, p3, z4.d, z26.d
; CHECK-NEXT:    st1d { z10.d }, p0, [x8, #6, mul vl]
; CHECK-NEXT:    mov z2.d, p7/m, #0 // =0x0
; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #5, mul vl]
; CHECK-NEXT:    mov z5.d, p2/m, #0 // =0x0
; CHECK-NEXT:    st1d { z9.d }, p0, [x8, #4, mul vl]
; CHECK-NEXT:    mov z30.d, p4/m, #0 // =0x0
; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #3, mul vl]
; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #2, mul vl]
; CHECK-NEXT:    st1d { z30.d }, p0, [x8, #1, mul vl]
; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    addvl sp, sp, #17
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f16(<vscale x 32 x half> %x)
  ret <vscale x 32 x i64> %a
}
declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f16(<vscale x 32 x half>)

define <vscale x 1 x i64> @llrint_v1i64_v1f32(<vscale x 1 x float> %x) {
; CHECK-LABEL: llrint_v1i64_v1f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov w8, #-553648128 // =0xdf000000
; CHECK-NEXT:    mov z2.d, #0x8000000000000000
; CHECK-NEXT:    mov z1.s, w8
; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
; CHECK-NEXT:    mov z3.s, w8
; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
; CHECK-NEXT:    movprfx z1, z0
; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.s
; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z3.s
; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
; CHECK-NEXT:    mov z1.d, p1/m, z2.d
; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
; CHECK-NEXT:    ret
  %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
  ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float>)

define <vscale x 2 x i64> @llrint_v2i64_v2f32(<vscale x 2 x float> %x) {
; CHECK-LABEL: llrint_v2i64_v2f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov w8, #-553648128 // =0xdf000000
; CHECK-NEXT:    mov z2.d, #0x8000000000000000
; CHECK-NEXT:    mov z1.s, w8
; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
; CHECK-NEXT:    mov z3.s, w8
; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z1.s
; CHECK-NEXT:    movprfx z1, z0
; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.s
; CHECK-NEXT:    fcmgt p2.s, p0/z, z0.s, z3.s
; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
; CHECK-NEXT:    mov z1.d, p1/m, z2.d
; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
; CHECK-NEXT:    ret
  %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
  ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float>)

define <vscale x 4 x i64> @llrint_v4i64_v4f32(<vscale x 4 x float> %x) {
; CHECK-LABEL: llrint_v4i64_v4f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-1
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    uunpklo z1.d, z0.s
; CHECK-NEXT:    uunpkhi z0.d, z0.s
; CHECK-NEXT:    mov w8, #-553648128 // =0xdf000000
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov z2.s, w8
; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
; CHECK-NEXT:    mov z3.s, w8
; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
; CHECK-NEXT:    fcmge p1.s, p0/z, z1.s, z2.s
; CHECK-NEXT:    fcmge p2.s, p0/z, z0.s, z2.s
; CHECK-NEXT:    mov z2.d, #0x8000000000000000
; CHECK-NEXT:    movprfx z4, z1
; CHECK-NEXT:    fcvtzs z4.d, p0/m, z1.s
; CHECK-NEXT:    movprfx z5, z0
; CHECK-NEXT:    fcvtzs z5.d, p0/m, z0.s
; CHECK-NEXT:    fcmgt p3.s, p0/z, z1.s, z3.s
; CHECK-NEXT:    fcmgt p4.s, p0/z, z0.s, z3.s
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
; CHECK-NEXT:    fcmuo p1.s, p0/z, z1.s, z1.s
; CHECK-NEXT:    fcmuo p0.s, p0/z, z0.s, z0.s
; CHECK-NEXT:    sel z2.d, p2, z2.d, z5.d
; CHECK-NEXT:    sel z0.d, p3, z6.d, z3.d
; CHECK-NEXT:    sel z1.d, p4, z6.d, z2.d
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
; CHECK-NEXT:    mov z1.d, p0/m, #0 // =0x0
; CHECK-NEXT:    addvl sp, sp, #1
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
  ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float>)

define <vscale x 8 x i64> @llrint_v8i64_v8f32(<vscale x 8 x float> %x) {
; CHECK-LABEL: llrint_v8i64_v8f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-1
; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    uunpklo z2.d, z0.s
; CHECK-NEXT:    uunpkhi z0.d, z0.s
; CHECK-NEXT:    mov w8, #-553648128 // =0xdf000000
; CHECK-NEXT:    uunpklo z3.d, z1.s
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    uunpkhi z1.d, z1.s
; CHECK-NEXT:    mov z4.s, w8
; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
; CHECK-NEXT:    mov z5.d, #0x8000000000000000
; CHECK-NEXT:    mov z6.s, w8
; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
; CHECK-NEXT:    frintx z2.s, p0/m, z2.s
; CHECK-NEXT:    frintx z0.s, p0/m, z0.s
; CHECK-NEXT:    frintx z3.s, p0/m, z3.s
; CHECK-NEXT:    frintx z1.s, p0/m, z1.s
; CHECK-NEXT:    fcmge p1.s, p0/z, z2.s, z4.s
; CHECK-NEXT:    fcmge p2.s, p0/z, z0.s, z4.s
; CHECK-NEXT:    movprfx z7, z0
; CHECK-NEXT:    fcvtzs z7.d, p0/m, z0.s
; CHECK-NEXT:    fcmge p3.s, p0/z, z3.s, z4.s
; CHECK-NEXT:    fcmge p4.s, p0/z, z1.s, z4.s
; CHECK-NEXT:    movprfx z4, z2
; CHECK-NEXT:    fcvtzs z4.d, p0/m, z2.s
; CHECK-NEXT:    movprfx z24, z3
; CHECK-NEXT:    fcvtzs z24.d, p0/m, z3.s
; CHECK-NEXT:    movprfx z25, z1
; CHECK-NEXT:    fcvtzs z25.d, p0/m, z1.s
; CHECK-NEXT:    fcmgt p7.s, p0/z, z3.s, z6.s
; CHECK-NEXT:    fcmgt p5.s, p0/z, z2.s, z6.s
; CHECK-NEXT:    fcmgt p6.s, p0/z, z0.s, z6.s
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    mov z4.d, p1/m, z5.d
; CHECK-NEXT:    fcmgt p1.s, p0/z, z1.s, z6.s
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    sel z6.d, p2, z5.d, z7.d
; CHECK-NEXT:    fcmuo p2.s, p0/z, z2.s, z2.s
; CHECK-NEXT:    sel z7.d, p3, z5.d, z24.d
; CHECK-NEXT:    fcmuo p3.s, p0/z, z0.s, z0.s
; CHECK-NEXT:    sel z5.d, p4, z5.d, z25.d
; CHECK-NEXT:    fcmuo p4.s, p0/z, z3.s, z3.s
; CHECK-NEXT:    fcmuo p0.s, p0/z, z1.s, z1.s
; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    sel z3.d, p1, z26.d, z5.d
; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
; CHECK-NEXT:    mov z2.d, p4/m, #0 // =0x0
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z3.d, p0/m, #0 // =0x0
; CHECK-NEXT:    addvl sp, sp, #1
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
  ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>)

define <vscale x 16 x i64> @llrint_v16i64_v16f32(<vscale x 16 x float> %x) {
; CHECK-LABEL: llrint_v16i64_v16f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-2
; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
; CHECK-NEXT:    uunpklo z4.d, z0.s
; CHECK-NEXT:    uunpkhi z0.d, z0.s
; CHECK-NEXT:    mov w8, #-553648128 // =0xdf000000
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    uunpklo z7.d, z1.s
; CHECK-NEXT:    uunpkhi z1.d, z1.s
; CHECK-NEXT:    uunpklo z24.d, z2.s
; CHECK-NEXT:    uunpkhi z2.d, z2.s
; CHECK-NEXT:    uunpklo z25.d, z3.s
; CHECK-NEXT:    uunpkhi z3.d, z3.s
; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
; CHECK-NEXT:    movprfx z5, z4
; CHECK-NEXT:    frintx z5.s, p0/m, z4.s
; CHECK-NEXT:    movprfx z6, z0
; CHECK-NEXT:    frintx z6.s, p0/m, z0.s
; CHECK-NEXT:    mov z4.s, w8
; CHECK-NEXT:    frintx z7.s, p0/m, z7.s
; CHECK-NEXT:    movprfx z28, z1
; CHECK-NEXT:    frintx z28.s, p0/m, z1.s
; CHECK-NEXT:    mov w8, #1593835519 // =0x5effffff
; CHECK-NEXT:    mov z0.d, #0x8000000000000000
; CHECK-NEXT:    frintx z24.s, p0/m, z24.s
; CHECK-NEXT:    movprfx z29, z2
; CHECK-NEXT:    frintx z29.s, p0/m, z2.s
; CHECK-NEXT:    frintx z25.s, p0/m, z25.s
; CHECK-NEXT:    movprfx z30, z3
; CHECK-NEXT:    frintx z30.s, p0/m, z3.s
; CHECK-NEXT:    mov z27.s, w8
; CHECK-NEXT:    fcmge p1.s, p0/z, z5.s, z4.s
; CHECK-NEXT:    fcmge p2.s, p0/z, z6.s, z4.s
; CHECK-NEXT:    movprfx z1, z5
; CHECK-NEXT:    fcvtzs z1.d, p0/m, z5.s
; CHECK-NEXT:    movprfx z2, z6
; CHECK-NEXT:    fcvtzs z2.d, p0/m, z6.s
; CHECK-NEXT:    fcmge p5.s, p0/z, z7.s, z4.s
; CHECK-NEXT:    fcmge p6.s, p0/z, z28.s, z4.s
; CHECK-NEXT:    movprfx z3, z7
; CHECK-NEXT:    fcvtzs z3.d, p0/m, z7.s
; CHECK-NEXT:    fcmge p8.s, p0/z, z29.s, z4.s
; CHECK-NEXT:    fcmgt p3.s, p0/z, z5.s, z27.s
; CHECK-NEXT:    fcmgt p7.s, p0/z, z6.s, z27.s
; CHECK-NEXT:    fcmge p9.s, p0/z, z25.s, z4.s
; CHECK-NEXT:    movprfx z31, z25
; CHECK-NEXT:    fcvtzs z31.d, p0/m, z25.s
; CHECK-NEXT:    not p4.b, p0/z, p1.b
; CHECK-NEXT:    fcmuo p1.s, p0/z, z5.s, z5.s
; CHECK-NEXT:    movprfx z5, z28
; CHECK-NEXT:    fcvtzs z5.d, p0/m, z28.s
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    fcmge p10.s, p0/z, z30.s, z4.s
; CHECK-NEXT:    movprfx z8, z30
; CHECK-NEXT:    fcvtzs z8.d, p0/m, z30.s
; CHECK-NEXT:    mov z1.d, p4/m, z0.d
; CHECK-NEXT:    fcmge p4.s, p0/z, z24.s, z4.s
; CHECK-NEXT:    movprfx z4, z29
; CHECK-NEXT:    fcvtzs z4.d, p0/m, z29.s
; CHECK-NEXT:    mov z2.d, p2/m, z0.d
; CHECK-NEXT:    fcmuo p2.s, p0/z, z6.s, z6.s
; CHECK-NEXT:    movprfx z6, z24
; CHECK-NEXT:    fcvtzs z6.d, p0/m, z24.s
; CHECK-NEXT:    not p5.b, p0/z, p5.b
; CHECK-NEXT:    not p6.b, p0/z, p6.b
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    mov z3.d, p5/m, z0.d
; CHECK-NEXT:    not p5.b, p0/z, p8.b
; CHECK-NEXT:    mov z5.d, p6/m, z0.d
; CHECK-NEXT:    fcmgt p8.s, p0/z, z7.s, z27.s
; CHECK-NEXT:    not p6.b, p0/z, p9.b
; CHECK-NEXT:    mov z6.d, p4/m, z0.d
; CHECK-NEXT:    fcmuo p9.s, p0/z, z7.s, z7.s
; CHECK-NEXT:    not p4.b, p0/z, p10.b
; CHECK-NEXT:    fcmgt p10.s, p0/z, z28.s, z27.s
; CHECK-NEXT:    sel z7.d, p5, z0.d, z4.d
; CHECK-NEXT:    fcmgt p5.s, p0/z, z24.s, z27.s
; CHECK-NEXT:    mov z31.d, p6/m, z0.d
; CHECK-NEXT:    fcmgt p6.s, p0/z, z30.s, z27.s
; CHECK-NEXT:    mov z8.d, p4/m, z0.d
; CHECK-NEXT:    sel z0.d, p3, z26.d, z1.d
; CHECK-NEXT:    fcmgt p3.s, p0/z, z29.s, z27.s
; CHECK-NEXT:    fcmgt p4.s, p0/z, z25.s, z27.s
; CHECK-NEXT:    sel z1.d, p7, z26.d, z2.d
; CHECK-NEXT:    fcmuo p7.s, p0/z, z28.s, z28.s
; CHECK-NEXT:    sel z2.d, p8, z26.d, z3.d
; CHECK-NEXT:    sel z3.d, p10, z26.d, z5.d
; CHECK-NEXT:    fcmuo p8.s, p0/z, z29.s, z29.s
; CHECK-NEXT:    sel z4.d, p5, z26.d, z6.d
; CHECK-NEXT:    fcmuo p5.s, p0/z, z24.s, z24.s
; CHECK-NEXT:    fcmuo p10.s, p0/z, z25.s, z25.s
; CHECK-NEXT:    sel z5.d, p3, z26.d, z7.d
; CHECK-NEXT:    fcmuo p0.s, p0/z, z30.s, z30.s
; CHECK-NEXT:    sel z7.d, p6, z26.d, z8.d
; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    sel z6.d, p4, z26.d, z31.d
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
; CHECK-NEXT:    mov z3.d, p7/m, #0 // =0x0
; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z4.d, p5/m, #0 // =0x0
; CHECK-NEXT:    mov z5.d, p8/m, #0 // =0x0
; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z6.d, p10/m, #0 // =0x0
; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
; CHECK-NEXT:    mov z7.d, p0/m, #0 // =0x0
; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    addvl sp, sp, #2
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
  ret <vscale x 16 x i64> %a
}
declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float>)

define <vscale x 32 x i64> @llrint_v32i64_v32f32(<vscale x 32 x float> %x) {
; CHECK-LABEL: llrint_v32i64_v32f32:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-17
; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str z23, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z22, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z21, [sp, #3, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z20, [sp, #4, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z19, [sp, #5, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z18, [sp, #6, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z17, [sp, #7, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z16, [sp, #8, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z15, [sp, #9, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z14, [sp, #10, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z13, [sp, #11, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z12, [sp, #12, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z11, [sp, #13, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z10, [sp, #14, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z9, [sp, #15, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z8, [sp, #16, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x88, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 136 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
; CHECK-NEXT:    uunpklo z24.d, z0.s
; CHECK-NEXT:    uunpkhi z25.d, z0.s
; CHECK-NEXT:    mov w9, #-553648128 // =0xdf000000
; CHECK-NEXT:    uunpklo z26.d, z1.s
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    uunpkhi z27.d, z1.s
; CHECK-NEXT:    mov z31.s, w9
; CHECK-NEXT:    mov w9, #1593835519 // =0x5effffff
; CHECK-NEXT:    uunpklo z28.d, z2.s
; CHECK-NEXT:    mov z8.d, #0x8000000000000000
; CHECK-NEXT:    uunpklo z30.d, z3.s
; CHECK-NEXT:    uunpklo z13.d, z4.s
; CHECK-NEXT:    movprfx z0, z24
; CHECK-NEXT:    frintx z0.s, p0/m, z24.s
; CHECK-NEXT:    movprfx z1, z25
; CHECK-NEXT:    frintx z1.s, p0/m, z25.s
; CHECK-NEXT:    uunpkhi z15.d, z4.s
; CHECK-NEXT:    movprfx z24, z26
; CHECK-NEXT:    frintx z24.s, p0/m, z26.s
; CHECK-NEXT:    uunpkhi z26.d, z2.s
; CHECK-NEXT:    movprfx z25, z27
; CHECK-NEXT:    frintx z25.s, p0/m, z27.s
; CHECK-NEXT:    movprfx z27, z28
; CHECK-NEXT:    frintx z27.s, p0/m, z28.s
; CHECK-NEXT:    uunpklo z16.d, z5.s
; CHECK-NEXT:    uunpkhi z17.d, z7.s
; CHECK-NEXT:    frintx z30.s, p0/m, z30.s
; CHECK-NEXT:    uunpklo z18.d, z7.s
; CHECK-NEXT:    uunpklo z21.d, z6.s
; CHECK-NEXT:    fcmge p1.s, p0/z, z0.s, z31.s
; CHECK-NEXT:    movprfx z9, z0
; CHECK-NEXT:    fcvtzs z9.d, p0/m, z0.s
; CHECK-NEXT:    movprfx z10, z1
; CHECK-NEXT:    fcvtzs z10.d, p0/m, z1.s
; CHECK-NEXT:    fcmge p2.s, p0/z, z1.s, z31.s
; CHECK-NEXT:    fcmge p3.s, p0/z, z24.s, z31.s
; CHECK-NEXT:    movprfx z11, z24
; CHECK-NEXT:    fcvtzs z11.d, p0/m, z24.s
; CHECK-NEXT:    movprfx z29, z26
; CHECK-NEXT:    frintx z29.s, p0/m, z26.s
; CHECK-NEXT:    fcmge p4.s, p0/z, z25.s, z31.s
; CHECK-NEXT:    fcmge p5.s, p0/z, z27.s, z31.s
; CHECK-NEXT:    movprfx z12, z27
; CHECK-NEXT:    fcvtzs z12.d, p0/m, z27.s
; CHECK-NEXT:    movprfx z19, z30
; CHECK-NEXT:    fcvtzs z19.d, p0/m, z30.s
; CHECK-NEXT:    movprfx z7, z16
; CHECK-NEXT:    frintx z7.s, p0/m, z16.s
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    frintx z17.s, p0/m, z17.s
; CHECK-NEXT:    uunpkhi z16.d, z5.s
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    frintx z18.s, p0/m, z18.s
; CHECK-NEXT:    mov z28.s, w9
; CHECK-NEXT:    not p6.b, p0/z, p3.b
; CHECK-NEXT:    sel z26.d, p1, z8.d, z9.d
; CHECK-NEXT:    movprfx z14, z29
; CHECK-NEXT:    fcvtzs z14.d, p0/m, z29.s
; CHECK-NEXT:    sel z9.d, p2, z8.d, z10.d
; CHECK-NEXT:    uunpkhi z10.d, z3.s
; CHECK-NEXT:    rdvl x9, #15
; CHECK-NEXT:    sel z3.d, p6, z8.d, z11.d
; CHECK-NEXT:    movprfx z11, z25
; CHECK-NEXT:    fcvtzs z11.d, p0/m, z25.s
; CHECK-NEXT:    fcmge p3.s, p0/z, z29.s, z31.s
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    fcmge p1.s, p0/z, z30.s, z31.s
; CHECK-NEXT:    movprfx z23, z18
; CHECK-NEXT:    fcvtzs z23.d, p0/m, z18.s
; CHECK-NEXT:    not p2.b, p0/z, p5.b
; CHECK-NEXT:    fcmge p5.s, p0/z, z17.s, z31.s
; CHECK-NEXT:    frintx z16.s, p0/m, z16.s
; CHECK-NEXT:    frintx z10.s, p0/m, z10.s
; CHECK-NEXT:    mov z2.d, #0x7fffffffffffffff
; CHECK-NEXT:    fcmgt p8.s, p0/z, z18.s, z28.s
; CHECK-NEXT:    sel z4.d, p4, z8.d, z11.d
; CHECK-NEXT:    movprfx z11, z13
; CHECK-NEXT:    frintx z11.s, p0/m, z13.s
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    sel z13.d, p2, z8.d, z12.d
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    fcmge p4.s, p0/z, z7.s, z31.s
; CHECK-NEXT:    sel z12.d, p3, z8.d, z14.d
; CHECK-NEXT:    movprfx z14, z15
; CHECK-NEXT:    frintx z14.s, p0/m, z15.s
; CHECK-NEXT:    uunpkhi z15.d, z6.s
; CHECK-NEXT:    movprfx z20, z10
; CHECK-NEXT:    fcvtzs z20.d, p0/m, z10.s
; CHECK-NEXT:    fcmge p2.s, p0/z, z10.s, z31.s
; CHECK-NEXT:    sel z5.d, p1, z8.d, z19.d
; CHECK-NEXT:    movprfx z19, z11
; CHECK-NEXT:    fcvtzs z19.d, p0/m, z11.s
; CHECK-NEXT:    fcmge p3.s, p0/z, z11.s, z31.s
; CHECK-NEXT:    not p5.b, p0/z, p5.b
; CHECK-NEXT:    fcmge p6.s, p0/z, z16.s, z31.s
; CHECK-NEXT:    fcmuo p9.s, p0/z, z18.s, z18.s
; CHECK-NEXT:    movprfx z22, z15
; CHECK-NEXT:    frintx z22.s, p0/m, z15.s
; CHECK-NEXT:    fcmge p1.s, p0/z, z14.s, z31.s
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    sel z6.d, p2, z8.d, z20.d
; CHECK-NEXT:    movprfx z20, z21
; CHECK-NEXT:    frintx z20.s, p0/m, z21.s
; CHECK-NEXT:    fcmge p2.s, p0/z, z18.s, z31.s
; CHECK-NEXT:    sel z15.d, p3, z8.d, z19.d
; CHECK-NEXT:    movprfx z19, z17
; CHECK-NEXT:    fcvtzs z19.d, p0/m, z17.s
; CHECK-NEXT:    not p3.b, p0/z, p4.b
; CHECK-NEXT:    fcmge p4.s, p0/z, z22.s, z31.s
; CHECK-NEXT:    movprfx z21, z14
; CHECK-NEXT:    fcvtzs z21.d, p0/m, z14.s
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    movprfx z18, z7
; CHECK-NEXT:    fcvtzs z18.d, p0/m, z7.s
; CHECK-NEXT:    not p6.b, p0/z, p6.b
; CHECK-NEXT:    fcmge p7.s, p0/z, z20.s, z31.s
; CHECK-NEXT:    movprfx z31, z22
; CHECK-NEXT:    fcvtzs z31.d, p0/m, z22.s
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    mov z19.d, p5/m, z8.d
; CHECK-NEXT:    fcmgt p5.s, p0/z, z17.s, z28.s
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    mov z23.d, p2/m, z8.d
; CHECK-NEXT:    fcmuo p2.s, p0/z, z17.s, z17.s
; CHECK-NEXT:    movprfx z17, z20
; CHECK-NEXT:    fcvtzs z17.d, p0/m, z20.s
; CHECK-NEXT:    mov z21.d, p1/m, z8.d
; CHECK-NEXT:    mov z18.d, p3/m, z8.d
; CHECK-NEXT:    not p1.b, p0/z, p7.b
; CHECK-NEXT:    mov z31.d, p4/m, z8.d
; CHECK-NEXT:    fcmgt p4.s, p0/z, z20.s, z28.s
; CHECK-NEXT:    mov z19.d, p5/m, z2.d
; CHECK-NEXT:    fcmuo p7.s, p0/z, z20.s, z20.s
; CHECK-NEXT:    movprfx z20, z16
; CHECK-NEXT:    fcvtzs z20.d, p0/m, z16.s
; CHECK-NEXT:    fcmgt p5.s, p0/z, z22.s, z28.s
; CHECK-NEXT:    mov z23.d, p8/m, z2.d
; CHECK-NEXT:    fcmuo p3.s, p0/z, z22.s, z22.s
; CHECK-NEXT:    mov z17.d, p1/m, z8.d
; CHECK-NEXT:    ptrue p1.b
; CHECK-NEXT:    mov z19.d, p2/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p2.s, p0/z, z16.s, z28.s
; CHECK-NEXT:    sel z8.d, p6, z8.d, z20.d
; CHECK-NEXT:    mov z23.d, p9/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p6.s, p0/z, z14.s, z28.s
; CHECK-NEXT:    mov z31.d, p5/m, z2.d
; CHECK-NEXT:    mov z17.d, p4/m, z2.d
; CHECK-NEXT:    fcmuo p4.s, p0/z, z16.s, z16.s
; CHECK-NEXT:    st1b { z19.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #14
; CHECK-NEXT:    fcmgt p5.s, p0/z, z1.s, z28.s
; CHECK-NEXT:    st1b { z23.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #13
; CHECK-NEXT:    mov z8.d, p2/m, z2.d
; CHECK-NEXT:    mov z31.d, p3/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p3.s, p0/z, z7.s, z28.s
; CHECK-NEXT:    mov z17.d, p7/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p2.s, p0/z, z11.s, z28.s
; CHECK-NEXT:    fcmuo p7.s, p0/z, z14.s, z14.s
; CHECK-NEXT:    mov z8.d, p4/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p4.s, p0/z, z7.s, z7.s
; CHECK-NEXT:    sel z7.d, p5, z2.d, z9.d
; CHECK-NEXT:    st1b { z31.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #12
; CHECK-NEXT:    fcmgt p5.s, p0/z, z27.s, z28.s
; CHECK-NEXT:    st1b { z17.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #11
; CHECK-NEXT:    sel z31.d, p3, z2.d, z18.d
; CHECK-NEXT:    st1b { z8.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #10
; CHECK-NEXT:    fcmgt p3.s, p0/z, z30.s, z28.s
; CHECK-NEXT:    sel z9.d, p2, z2.d, z15.d
; CHECK-NEXT:    fcmuo p2.s, p0/z, z11.s, z11.s
; CHECK-NEXT:    sel z8.d, p6, z2.d, z21.d
; CHECK-NEXT:    mov z31.d, p4/m, #0 // =0x0
; CHECK-NEXT:    fcmgt p4.s, p0/z, z10.s, z28.s
; CHECK-NEXT:    fcmgt p6.s, p0/z, z24.s, z28.s
; CHECK-NEXT:    sel z11.d, p5, z2.d, z13.d
; CHECK-NEXT:    fcmgt p5.s, p0/z, z25.s, z28.s
; CHECK-NEXT:    mov z8.d, p7/m, #0 // =0x0
; CHECK-NEXT:    mov z5.d, p3/m, z2.d
; CHECK-NEXT:    fcmgt p3.s, p0/z, z29.s, z28.s
; CHECK-NEXT:    st1b { z31.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #9
; CHECK-NEXT:    mov z9.d, p2/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p7.s, p0/z, z10.s, z10.s
; CHECK-NEXT:    fcmuo p2.s, p0/z, z30.s, z30.s
; CHECK-NEXT:    mov z6.d, p4/m, z2.d
; CHECK-NEXT:    st1b { z8.b }, p1, [x8, x9]
; CHECK-NEXT:    rdvl x9, #8
; CHECK-NEXT:    fcmuo p4.s, p0/z, z29.s, z29.s
; CHECK-NEXT:    st1b { z9.b }, p1, [x8, x9]
; CHECK-NEXT:    fcmuo p1.s, p0/z, z27.s, z27.s
; CHECK-NEXT:    sel z27.d, p3, z2.d, z12.d
; CHECK-NEXT:    fcmgt p3.s, p0/z, z0.s, z28.s
; CHECK-NEXT:    mov z4.d, p5/m, z2.d
; CHECK-NEXT:    mov z3.d, p6/m, z2.d
; CHECK-NEXT:    mov z6.d, p7/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p7.s, p0/z, z25.s, z25.s
; CHECK-NEXT:    mov z5.d, p2/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p2.s, p0/z, z24.s, z24.s
; CHECK-NEXT:    mov z27.d, p4/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p4.s, p0/z, z1.s, z1.s
; CHECK-NEXT:    mov z11.d, p1/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p1.s, p0/z, z0.s, z0.s
; CHECK-NEXT:    st1d { z6.d }, p0, [x8, #7, mul vl]
; CHECK-NEXT:    sel z0.d, p3, z2.d, z26.d
; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #6, mul vl]
; CHECK-NEXT:    mov z4.d, p7/m, #0 // =0x0
; CHECK-NEXT:    st1d { z27.d }, p0, [x8, #5, mul vl]
; CHECK-NEXT:    mov z3.d, p2/m, #0 // =0x0
; CHECK-NEXT:    mov z7.d, p4/m, #0 // =0x0
; CHECK-NEXT:    st1d { z11.d }, p0, [x8, #4, mul vl]
; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #3, mul vl]
; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #2, mul vl]
; CHECK-NEXT:    st1d { z7.d }, p0, [x8, #1, mul vl]
; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
; CHECK-NEXT:    ldr z23, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z22, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z21, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z20, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z19, [sp, #5, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z18, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z17, [sp, #7, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z16, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z15, [sp, #9, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z14, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z13, [sp, #11, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z12, [sp, #12, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z11, [sp, #13, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z10, [sp, #14, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z9, [sp, #15, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z8, [sp, #16, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    addvl sp, sp, #17
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f32(<vscale x 32 x float> %x)
  ret <vscale x 32 x i64> %a
}
declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f32(<vscale x 32 x float>)

define <vscale x 1 x i64> @llrint_v1i64_v1f64(<vscale x 1 x double> %x) {
; CHECK-LABEL: llrint_v1i64_v1f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov x8, #-4332462841530417152 // =0xc3e0000000000000
; CHECK-NEXT:    mov z2.d, #0x8000000000000000
; CHECK-NEXT:    mov z1.d, x8
; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
; CHECK-NEXT:    mov z3.d, x8
; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z1.d
; CHECK-NEXT:    movprfx z1, z0
; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.d
; CHECK-NEXT:    fcmgt p2.d, p0/z, z0.d, z3.d
; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
; CHECK-NEXT:    mov z1.d, p1/m, z2.d
; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
; CHECK-NEXT:    ret
  %a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
  ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double>)

define <vscale x 2 x i64> @llrint_v2i64_v2f64(<vscale x 2 x double> %x) {
; CHECK-LABEL: llrint_v2i64_v2f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov x8, #-4332462841530417152 // =0xc3e0000000000000
; CHECK-NEXT:    mov z2.d, #0x8000000000000000
; CHECK-NEXT:    mov z1.d, x8
; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
; CHECK-NEXT:    mov z3.d, x8
; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z1.d
; CHECK-NEXT:    movprfx z1, z0
; CHECK-NEXT:    fcvtzs z1.d, p0/m, z0.d
; CHECK-NEXT:    fcmgt p2.d, p0/z, z0.d, z3.d
; CHECK-NEXT:    mov z3.d, #0x7fffffffffffffff
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    fcmuo p0.d, p0/z, z0.d, z0.d
; CHECK-NEXT:    mov z1.d, p1/m, z2.d
; CHECK-NEXT:    sel z0.d, p2, z3.d, z1.d
; CHECK-NEXT:    mov z0.d, p0/m, #0 // =0x0
; CHECK-NEXT:    ret
  %a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
  ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double>)

define <vscale x 4 x i64> @llrint_v4i64_v4f64(<vscale x 4 x double> %x) {
; CHECK-LABEL: llrint_v4i64_v4f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-1
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov x8, #-4332462841530417152 // =0xc3e0000000000000
; CHECK-NEXT:    mov z6.d, #0x7fffffffffffffff
; CHECK-NEXT:    mov z2.d, x8
; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
; CHECK-NEXT:    mov z3.d, x8
; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z2.d
; CHECK-NEXT:    fcmge p2.d, p0/z, z1.d, z2.d
; CHECK-NEXT:    mov z2.d, #0x8000000000000000
; CHECK-NEXT:    movprfx z4, z0
; CHECK-NEXT:    fcvtzs z4.d, p0/m, z0.d
; CHECK-NEXT:    movprfx z5, z1
; CHECK-NEXT:    fcvtzs z5.d, p0/m, z1.d
; CHECK-NEXT:    fcmgt p3.d, p0/z, z0.d, z3.d
; CHECK-NEXT:    fcmgt p4.d, p0/z, z1.d, z3.d
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    sel z3.d, p1, z2.d, z4.d
; CHECK-NEXT:    fcmuo p1.d, p0/z, z0.d, z0.d
; CHECK-NEXT:    fcmuo p0.d, p0/z, z1.d, z1.d
; CHECK-NEXT:    sel z2.d, p2, z2.d, z5.d
; CHECK-NEXT:    sel z0.d, p3, z6.d, z3.d
; CHECK-NEXT:    sel z1.d, p4, z6.d, z2.d
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
; CHECK-NEXT:    mov z1.d, p0/m, #0 // =0x0
; CHECK-NEXT:    addvl sp, sp, #1
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
  ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double>)

define <vscale x 8 x i64> @llrint_v8i64_v8f64(<vscale x 8 x double> %x) {
; CHECK-LABEL: llrint_v8i64_v8f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-1
; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 8 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov x8, #-4332462841530417152 // =0xc3e0000000000000
; CHECK-NEXT:    mov z5.d, #0x8000000000000000
; CHECK-NEXT:    mov z4.d, x8
; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
; CHECK-NEXT:    mov z26.d, #0x7fffffffffffffff
; CHECK-NEXT:    frintx z0.d, p0/m, z0.d
; CHECK-NEXT:    frintx z1.d, p0/m, z1.d
; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
; CHECK-NEXT:    mov z6.d, x8
; CHECK-NEXT:    fcmge p1.d, p0/z, z0.d, z4.d
; CHECK-NEXT:    fcmge p2.d, p0/z, z1.d, z4.d
; CHECK-NEXT:    fcmge p3.d, p0/z, z2.d, z4.d
; CHECK-NEXT:    fcmge p4.d, p0/z, z3.d, z4.d
; CHECK-NEXT:    movprfx z4, z0
; CHECK-NEXT:    fcvtzs z4.d, p0/m, z0.d
; CHECK-NEXT:    movprfx z7, z1
; CHECK-NEXT:    fcvtzs z7.d, p0/m, z1.d
; CHECK-NEXT:    movprfx z24, z2
; CHECK-NEXT:    fcvtzs z24.d, p0/m, z2.d
; CHECK-NEXT:    movprfx z25, z3
; CHECK-NEXT:    fcvtzs z25.d, p0/m, z3.d
; CHECK-NEXT:    fcmgt p7.d, p0/z, z2.d, z6.d
; CHECK-NEXT:    fcmgt p5.d, p0/z, z0.d, z6.d
; CHECK-NEXT:    fcmgt p6.d, p0/z, z1.d, z6.d
; CHECK-NEXT:    not p1.b, p0/z, p1.b
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    mov z4.d, p1/m, z5.d
; CHECK-NEXT:    fcmgt p1.d, p0/z, z3.d, z6.d
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    sel z6.d, p2, z5.d, z7.d
; CHECK-NEXT:    fcmuo p2.d, p0/z, z0.d, z0.d
; CHECK-NEXT:    sel z7.d, p3, z5.d, z24.d
; CHECK-NEXT:    fcmuo p3.d, p0/z, z1.d, z1.d
; CHECK-NEXT:    sel z5.d, p4, z5.d, z25.d
; CHECK-NEXT:    fcmuo p4.d, p0/z, z2.d, z2.d
; CHECK-NEXT:    fcmuo p0.d, p0/z, z3.d, z3.d
; CHECK-NEXT:    sel z0.d, p5, z26.d, z4.d
; CHECK-NEXT:    sel z1.d, p6, z26.d, z6.d
; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    sel z2.d, p7, z26.d, z7.d
; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    sel z3.d, p1, z26.d, z5.d
; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z0.d, p2/m, #0 // =0x0
; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
; CHECK-NEXT:    mov z2.d, p4/m, #0 // =0x0
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z3.d, p0/m, #0 // =0x0
; CHECK-NEXT:    addvl sp, sp, #1
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
  ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double>)

define <vscale x 16 x i64> @llrint_v16f64(<vscale x 16 x double> %x) {
; CHECK-LABEL: llrint_v16f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-2
; CHECK-NEXT:    str p10, [sp, #1, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p9, [sp, #2, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str z8, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    mov x8, #-4332462841530417152 // =0xc3e0000000000000
; CHECK-NEXT:    mov z24.d, #0x7fffffffffffffff
; CHECK-NEXT:    mov z25.d, x8
; CHECK-NEXT:    mov x8, #4890909195324358655 // =0x43dfffffffffffff
; CHECK-NEXT:    movprfx z26, z0
; CHECK-NEXT:    frintx z26.d, p0/m, z0.d
; CHECK-NEXT:    movprfx z27, z1
; CHECK-NEXT:    frintx z27.d, p0/m, z1.d
; CHECK-NEXT:    frintx z2.d, p0/m, z2.d
; CHECK-NEXT:    mov z0.d, #0x8000000000000000
; CHECK-NEXT:    mov z1.d, x8
; CHECK-NEXT:    frintx z3.d, p0/m, z3.d
; CHECK-NEXT:    movprfx z28, z4
; CHECK-NEXT:    frintx z28.d, p0/m, z4.d
; CHECK-NEXT:    frintx z5.d, p0/m, z5.d
; CHECK-NEXT:    frintx z6.d, p0/m, z6.d
; CHECK-NEXT:    frintx z7.d, p0/m, z7.d
; CHECK-NEXT:    fcmge p1.d, p0/z, z26.d, z25.d
; CHECK-NEXT:    fcmge p2.d, p0/z, z27.d, z25.d
; CHECK-NEXT:    movprfx z4, z26
; CHECK-NEXT:    fcvtzs z4.d, p0/m, z26.d
; CHECK-NEXT:    fcmge p5.d, p0/z, z2.d, z25.d
; CHECK-NEXT:    movprfx z29, z27
; CHECK-NEXT:    fcvtzs z29.d, p0/m, z27.d
; CHECK-NEXT:    fcmgt p3.d, p0/z, z26.d, z1.d
; CHECK-NEXT:    fcmge p6.d, p0/z, z3.d, z25.d
; CHECK-NEXT:    fcmge p8.d, p0/z, z5.d, z25.d
; CHECK-NEXT:    fcmgt p7.d, p0/z, z27.d, z1.d
; CHECK-NEXT:    fcmge p9.d, p0/z, z6.d, z25.d
; CHECK-NEXT:    movprfx z30, z28
; CHECK-NEXT:    fcvtzs z30.d, p0/m, z28.d
; CHECK-NEXT:    fcmge p10.d, p0/z, z7.d, z25.d
; CHECK-NEXT:    not p4.b, p0/z, p1.b
; CHECK-NEXT:    fcmuo p1.d, p0/z, z26.d, z26.d
; CHECK-NEXT:    movprfx z26, z2
; CHECK-NEXT:    fcvtzs z26.d, p0/m, z2.d
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    movprfx z31, z6
; CHECK-NEXT:    fcvtzs z31.d, p0/m, z6.d
; CHECK-NEXT:    movprfx z8, z7
; CHECK-NEXT:    fcvtzs z8.d, p0/m, z7.d
; CHECK-NEXT:    mov z4.d, p4/m, z0.d
; CHECK-NEXT:    fcmge p4.d, p0/z, z28.d, z25.d
; CHECK-NEXT:    not p5.b, p0/z, p5.b
; CHECK-NEXT:    mov z29.d, p2/m, z0.d
; CHECK-NEXT:    fcmuo p2.d, p0/z, z27.d, z27.d
; CHECK-NEXT:    movprfx z27, z3
; CHECK-NEXT:    fcvtzs z27.d, p0/m, z3.d
; CHECK-NEXT:    sel z25.d, p5, z0.d, z26.d
; CHECK-NEXT:    movprfx z26, z5
; CHECK-NEXT:    fcvtzs z26.d, p0/m, z5.d
; CHECK-NEXT:    not p6.b, p0/z, p6.b
; CHECK-NEXT:    not p5.b, p0/z, p8.b
; CHECK-NEXT:    fcmgt p8.d, p0/z, z2.d, z1.d
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    mov z27.d, p6/m, z0.d
; CHECK-NEXT:    not p6.b, p0/z, p9.b
; CHECK-NEXT:    fcmuo p9.d, p0/z, z2.d, z2.d
; CHECK-NEXT:    mov z30.d, p4/m, z0.d
; CHECK-NEXT:    not p4.b, p0/z, p10.b
; CHECK-NEXT:    fcmgt p10.d, p0/z, z3.d, z1.d
; CHECK-NEXT:    mov z26.d, p5/m, z0.d
; CHECK-NEXT:    fcmgt p5.d, p0/z, z28.d, z1.d
; CHECK-NEXT:    mov z31.d, p6/m, z0.d
; CHECK-NEXT:    mov z8.d, p4/m, z0.d
; CHECK-NEXT:    sel z0.d, p3, z24.d, z4.d
; CHECK-NEXT:    fcmgt p3.d, p0/z, z5.d, z1.d
; CHECK-NEXT:    fcmgt p4.d, p0/z, z6.d, z1.d
; CHECK-NEXT:    fcmgt p6.d, p0/z, z7.d, z1.d
; CHECK-NEXT:    sel z1.d, p7, z24.d, z29.d
; CHECK-NEXT:    fcmuo p7.d, p0/z, z3.d, z3.d
; CHECK-NEXT:    sel z2.d, p8, z24.d, z25.d
; CHECK-NEXT:    sel z3.d, p10, z24.d, z27.d
; CHECK-NEXT:    sel z4.d, p5, z24.d, z30.d
; CHECK-NEXT:    fcmuo p5.d, p0/z, z28.d, z28.d
; CHECK-NEXT:    fcmuo p8.d, p0/z, z5.d, z5.d
; CHECK-NEXT:    fcmuo p10.d, p0/z, z6.d, z6.d
; CHECK-NEXT:    sel z5.d, p3, z24.d, z26.d
; CHECK-NEXT:    fcmuo p0.d, p0/z, z7.d, z7.d
; CHECK-NEXT:    sel z6.d, p4, z24.d, z31.d
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    sel z7.d, p6, z24.d, z8.d
; CHECK-NEXT:    ldr z8, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z2.d, p9/m, #0 // =0x0
; CHECK-NEXT:    ldr p9, [sp, #2, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z3.d, p7/m, #0 // =0x0
; CHECK-NEXT:    mov z4.d, p5/m, #0 // =0x0
; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z5.d, p8/m, #0 // =0x0
; CHECK-NEXT:    mov z6.d, p10/m, #0 // =0x0
; CHECK-NEXT:    ldr p10, [sp, #1, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
; CHECK-NEXT:    mov z1.d, p2/m, #0 // =0x0
; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    mov z7.d, p0/m, #0 // =0x0
; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    addvl sp, sp, #2
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f64(<vscale x 16 x double> %x)
  ret <vscale x 16 x i64> %a
}
declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f64(<vscale x 16 x double>)

define <vscale x 32 x i64> @llrint_v32f64(<vscale x 32 x double> %x) {
; CHECK-LABEL: llrint_v32f64:
; CHECK:       // %bb.0:
; CHECK-NEXT:    str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT:    addvl sp, sp, #-12
; CHECK-NEXT:    str p8, [sp, #3, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p7, [sp, #4, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p6, [sp, #5, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p5, [sp, #6, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str p4, [sp, #7, mul vl] // 2-byte Folded Spill
; CHECK-NEXT:    str z18, [sp, #1, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z17, [sp, #2, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z16, [sp, #3, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z15, [sp, #4, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z14, [sp, #5, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z13, [sp, #6, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z12, [sp, #7, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z11, [sp, #8, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z10, [sp, #9, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z9, [sp, #10, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    str z8, [sp, #11, mul vl] // 16-byte Folded Spill
; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xe0, 0x00, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 96 * VG
; CHECK-NEXT:    .cfi_offset w29, -16
; CHECK-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
; CHECK-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
; CHECK-NEXT:    ptrue p1.b
; CHECK-NEXT:    rdvl x9, #8
; CHECK-NEXT:    rdvl x10, #9
; CHECK-NEXT:    ptrue p0.d
; CHECK-NEXT:    rdvl x11, #10
; CHECK-NEXT:    mov x12, #-4332462841530417152 // =0xc3e0000000000000
; CHECK-NEXT:    ld1b { z0.b }, p1/z, [x0, x9]
; CHECK-NEXT:    ld1b { z1.b }, p1/z, [x0, x10]
; CHECK-NEXT:    mov z2.d, x12
; CHECK-NEXT:    rdvl x14, #13
; CHECK-NEXT:    rdvl x13, #12
; CHECK-NEXT:    rdvl x12, #11
; CHECK-NEXT:    ld1b { z6.b }, p1/z, [x0, x14]
; CHECK-NEXT:    ld1b { z7.b }, p1/z, [x0, x13]
; CHECK-NEXT:    mov z3.d, #0x8000000000000000
; CHECK-NEXT:    movprfx z24, z0
; CHECK-NEXT:    frintx z24.d, p0/m, z0.d
; CHECK-NEXT:    ld1b { z0.b }, p1/z, [x0, x11]
; CHECK-NEXT:    movprfx z5, z1
; CHECK-NEXT:    frintx z5.d, p0/m, z1.d
; CHECK-NEXT:    ld1b { z1.b }, p1/z, [x0, x12]
; CHECK-NEXT:    mov x15, #4890909195324358655 // =0x43dfffffffffffff
; CHECK-NEXT:    rdvl x16, #15
; CHECK-NEXT:    movprfx z30, z6
; CHECK-NEXT:    frintx z30.d, p0/m, z6.d
; CHECK-NEXT:    movprfx z28, z7
; CHECK-NEXT:    frintx z28.d, p0/m, z7.d
; CHECK-NEXT:    ld1b { z8.b }, p1/z, [x0, x16]
; CHECK-NEXT:    movprfx z4, z0
; CHECK-NEXT:    frintx z4.d, p0/m, z0.d
; CHECK-NEXT:    mov z0.d, #0x7fffffffffffffff
; CHECK-NEXT:    ld1d { z18.d }, p0/z, [x0]
; CHECK-NEXT:    fcmge p3.d, p0/z, z5.d, z2.d
; CHECK-NEXT:    fcmge p2.d, p0/z, z24.d, z2.d
; CHECK-NEXT:    movprfx z6, z5
; CHECK-NEXT:    fcvtzs z6.d, p0/m, z5.d
; CHECK-NEXT:    movprfx z27, z1
; CHECK-NEXT:    frintx z27.d, p0/m, z1.d
; CHECK-NEXT:    movprfx z25, z24
; CHECK-NEXT:    fcvtzs z25.d, p0/m, z24.d
; CHECK-NEXT:    mov z1.d, x15
; CHECK-NEXT:    rdvl x15, #14
; CHECK-NEXT:    movprfx z9, z28
; CHECK-NEXT:    fcvtzs z9.d, p0/m, z28.d
; CHECK-NEXT:    movprfx z13, z8
; CHECK-NEXT:    frintx z13.d, p0/m, z8.d
; CHECK-NEXT:    fcmge p4.d, p0/z, z4.d, z2.d
; CHECK-NEXT:    movprfx z7, z4
; CHECK-NEXT:    fcvtzs z7.d, p0/m, z4.d
; CHECK-NEXT:    ld1d { z15.d }, p0/z, [x0, #2, mul vl]
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    fcmgt p5.d, p0/z, z24.d, z1.d
; CHECK-NEXT:    fcmgt p6.d, p0/z, z5.d, z1.d
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    fcmge p7.d, p0/z, z27.d, z2.d
; CHECK-NEXT:    movprfx z26, z27
; CHECK-NEXT:    fcvtzs z26.d, p0/m, z27.d
; CHECK-NEXT:    sel z29.d, p3, z3.d, z6.d
; CHECK-NEXT:    ld1b { z6.b }, p1/z, [x0, x15]
; CHECK-NEXT:    fcmge p3.d, p0/z, z28.d, z2.d
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    mov z25.d, p2/m, z3.d
; CHECK-NEXT:    fcmgt p2.d, p0/z, z4.d, z1.d
; CHECK-NEXT:    movprfx z16, z13
; CHECK-NEXT:    fcvtzs z16.d, p0/m, z13.d
; CHECK-NEXT:    ld1d { z17.d }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT:    ld1d { z14.d }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT:    sel z31.d, p4, z3.d, z7.d
; CHECK-NEXT:    movprfx z11, z6
; CHECK-NEXT:    frintx z11.d, p0/m, z6.d
; CHECK-NEXT:    not p7.b, p0/z, p7.b
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    sel z6.d, p5, z0.d, z25.d
; CHECK-NEXT:    fcmgt p4.d, p0/z, z27.d, z1.d
; CHECK-NEXT:    sel z7.d, p6, z0.d, z29.d
; CHECK-NEXT:    mov z26.d, p7/m, z3.d
; CHECK-NEXT:    fcmge p5.d, p0/z, z13.d, z2.d
; CHECK-NEXT:    sel z25.d, p2, z0.d, z31.d
; CHECK-NEXT:    fcmge p2.d, p0/z, z30.d, z2.d
; CHECK-NEXT:    sel z29.d, p3, z3.d, z9.d
; CHECK-NEXT:    fcmge p3.d, p0/z, z11.d, z2.d
; CHECK-NEXT:    movprfx z31, z30
; CHECK-NEXT:    fcvtzs z31.d, p0/m, z30.d
; CHECK-NEXT:    movprfx z9, z11
; CHECK-NEXT:    fcvtzs z9.d, p0/m, z11.d
; CHECK-NEXT:    mov z26.d, p4/m, z0.d
; CHECK-NEXT:    fcmgt p4.d, p0/z, z28.d, z1.d
; CHECK-NEXT:    fcmgt p6.d, p0/z, z30.d, z1.d
; CHECK-NEXT:    not p7.b, p0/z, p5.b
; CHECK-NEXT:    fcmuo p5.d, p0/z, z27.d, z27.d
; CHECK-NEXT:    fcmgt p8.d, p0/z, z13.d, z1.d
; CHECK-NEXT:    not p2.b, p0/z, p2.b
; CHECK-NEXT:    movprfx z27, z18
; CHECK-NEXT:    frintx z27.d, p0/m, z18.d
; CHECK-NEXT:    ld1d { z8.d }, p0/z, [x0, #7, mul vl]
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    mov z16.d, p7/m, z3.d
; CHECK-NEXT:    fcmuo p7.d, p0/z, z13.d, z13.d
; CHECK-NEXT:    mov z31.d, p2/m, z3.d
; CHECK-NEXT:    fcmgt p2.d, p0/z, z11.d, z1.d
; CHECK-NEXT:    mov z29.d, p4/m, z0.d
; CHECK-NEXT:    mov z9.d, p3/m, z3.d
; CHECK-NEXT:    fcmuo p3.d, p0/z, z28.d, z28.d
; CHECK-NEXT:    fcmuo p4.d, p0/z, z30.d, z30.d
; CHECK-NEXT:    movprfx z28, z17
; CHECK-NEXT:    frintx z28.d, p0/m, z17.d
; CHECK-NEXT:    movprfx z30, z15
; CHECK-NEXT:    frintx z30.d, p0/m, z15.d
; CHECK-NEXT:    ld1d { z13.d }, p0/z, [x0, #4, mul vl]
; CHECK-NEXT:    mov z31.d, p6/m, z0.d
; CHECK-NEXT:    fcmuo p6.d, p0/z, z11.d, z11.d
; CHECK-NEXT:    sel z11.d, p8, z0.d, z16.d
; CHECK-NEXT:    mov z9.d, p2/m, z0.d
; CHECK-NEXT:    fcmuo p2.d, p0/z, z24.d, z24.d
; CHECK-NEXT:    movprfx z24, z14
; CHECK-NEXT:    frintx z24.d, p0/m, z14.d
; CHECK-NEXT:    fcmge p8.d, p0/z, z27.d, z2.d
; CHECK-NEXT:    ld1d { z10.d }, p0/z, [x0, #6, mul vl]
; CHECK-NEXT:    ld1d { z12.d }, p0/z, [x0, #5, mul vl]
; CHECK-NEXT:    mov z26.d, p5/m, #0 // =0x0
; CHECK-NEXT:    mov z29.d, p3/m, #0 // =0x0
; CHECK-NEXT:    fcmge p5.d, p0/z, z28.d, z2.d
; CHECK-NEXT:    movprfx z14, z27
; CHECK-NEXT:    fcvtzs z14.d, p0/m, z27.d
; CHECK-NEXT:    fcmge p3.d, p0/z, z30.d, z2.d
; CHECK-NEXT:    frintx z13.d, p0/m, z13.d
; CHECK-NEXT:    mov z31.d, p4/m, #0 // =0x0
; CHECK-NEXT:    fcmge p4.d, p0/z, z24.d, z2.d
; CHECK-NEXT:    mov z9.d, p6/m, #0 // =0x0
; CHECK-NEXT:    movprfx z15, z28
; CHECK-NEXT:    fcvtzs z15.d, p0/m, z28.d
; CHECK-NEXT:    not p6.b, p0/z, p8.b
; CHECK-NEXT:    movprfx z16, z30
; CHECK-NEXT:    fcvtzs z16.d, p0/m, z30.d
; CHECK-NEXT:    frintx z12.d, p0/m, z12.d
; CHECK-NEXT:    frintx z10.d, p0/m, z10.d
; CHECK-NEXT:    movprfx z17, z24
; CHECK-NEXT:    fcvtzs z17.d, p0/m, z24.d
; CHECK-NEXT:    movprfx z18, z8
; CHECK-NEXT:    frintx z18.d, p0/m, z8.d
; CHECK-NEXT:    not p5.b, p0/z, p5.b
; CHECK-NEXT:    sel z8.d, p6, z3.d, z14.d
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    fcmge p6.d, p0/z, z13.d, z2.d
; CHECK-NEXT:    mov z11.d, p7/m, #0 // =0x0
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    sel z14.d, p5, z3.d, z15.d
; CHECK-NEXT:    fcmuo p7.d, p0/z, z5.d, z5.d
; CHECK-NEXT:    sel z15.d, p3, z3.d, z16.d
; CHECK-NEXT:    movprfx z16, z13
; CHECK-NEXT:    fcvtzs z16.d, p0/m, z13.d
; CHECK-NEXT:    fcmge p5.d, p0/z, z12.d, z2.d
; CHECK-NEXT:    fcmge p3.d, p0/z, z10.d, z2.d
; CHECK-NEXT:    sel z5.d, p4, z3.d, z17.d
; CHECK-NEXT:    fcmge p4.d, p0/z, z18.d, z2.d
; CHECK-NEXT:    not p6.b, p0/z, p6.b
; CHECK-NEXT:    movprfx z2, z12
; CHECK-NEXT:    fcvtzs z2.d, p0/m, z12.d
; CHECK-NEXT:    movprfx z17, z10
; CHECK-NEXT:    fcvtzs z17.d, p0/m, z10.d
; CHECK-NEXT:    st1b { z11.b }, p1, [x8, x16]
; CHECK-NEXT:    movprfx z11, z18
; CHECK-NEXT:    fcvtzs z11.d, p0/m, z18.d
; CHECK-NEXT:    mov z6.d, p2/m, #0 // =0x0
; CHECK-NEXT:    st1b { z9.b }, p1, [x8, x15]
; CHECK-NEXT:    sel z9.d, p6, z3.d, z16.d
; CHECK-NEXT:    fcmuo p6.d, p0/z, z4.d, z4.d
; CHECK-NEXT:    not p5.b, p0/z, p5.b
; CHECK-NEXT:    fcmgt p2.d, p0/z, z18.d, z1.d
; CHECK-NEXT:    mov z7.d, p7/m, #0 // =0x0
; CHECK-NEXT:    not p3.b, p0/z, p3.b
; CHECK-NEXT:    st1b { z31.b }, p1, [x8, x14]
; CHECK-NEXT:    fcmgt p7.d, p0/z, z24.d, z1.d
; CHECK-NEXT:    not p4.b, p0/z, p4.b
; CHECK-NEXT:    mov z2.d, p5/m, z3.d
; CHECK-NEXT:    fcmgt p5.d, p0/z, z28.d, z1.d
; CHECK-NEXT:    sel z4.d, p3, z3.d, z17.d
; CHECK-NEXT:    fcmgt p3.d, p0/z, z13.d, z1.d
; CHECK-NEXT:    mov z25.d, p6/m, #0 // =0x0
; CHECK-NEXT:    sel z3.d, p4, z3.d, z11.d
; CHECK-NEXT:    fcmgt p4.d, p0/z, z10.d, z1.d
; CHECK-NEXT:    fcmgt p6.d, p0/z, z12.d, z1.d
; CHECK-NEXT:    st1b { z29.b }, p1, [x8, x13]
; CHECK-NEXT:    st1b { z26.b }, p1, [x8, x12]
; CHECK-NEXT:    sel z26.d, p5, z0.d, z14.d
; CHECK-NEXT:    fcmgt p5.d, p0/z, z30.d, z1.d
; CHECK-NEXT:    sel z29.d, p3, z0.d, z9.d
; CHECK-NEXT:    fcmuo p3.d, p0/z, z18.d, z18.d
; CHECK-NEXT:    mov z3.d, p2/m, z0.d
; CHECK-NEXT:    st1b { z25.b }, p1, [x8, x11]
; CHECK-NEXT:    fcmuo p2.d, p0/z, z10.d, z10.d
; CHECK-NEXT:    mov z4.d, p4/m, z0.d
; CHECK-NEXT:    fcmuo p4.d, p0/z, z12.d, z12.d
; CHECK-NEXT:    st1b { z7.b }, p1, [x8, x10]
; CHECK-NEXT:    mov z2.d, p6/m, z0.d
; CHECK-NEXT:    st1b { z6.b }, p1, [x8, x9]
; CHECK-NEXT:    fcmuo p1.d, p0/z, z13.d, z13.d
; CHECK-NEXT:    fcmgt p6.d, p0/z, z27.d, z1.d
; CHECK-NEXT:    mov z3.d, p3/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p3.d, p0/z, z24.d, z24.d
; CHECK-NEXT:    sel z1.d, p7, z0.d, z5.d
; CHECK-NEXT:    mov z4.d, p2/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p2.d, p0/z, z30.d, z30.d
; CHECK-NEXT:    sel z5.d, p5, z0.d, z15.d
; CHECK-NEXT:    mov z2.d, p4/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p4.d, p0/z, z28.d, z28.d
; CHECK-NEXT:    mov z29.d, p1/m, #0 // =0x0
; CHECK-NEXT:    fcmuo p1.d, p0/z, z27.d, z27.d
; CHECK-NEXT:    sel z0.d, p6, z0.d, z8.d
; CHECK-NEXT:    mov z1.d, p3/m, #0 // =0x0
; CHECK-NEXT:    st1d { z3.d }, p0, [x8, #7, mul vl]
; CHECK-NEXT:    mov z5.d, p2/m, #0 // =0x0
; CHECK-NEXT:    st1d { z4.d }, p0, [x8, #6, mul vl]
; CHECK-NEXT:    mov z26.d, p4/m, #0 // =0x0
; CHECK-NEXT:    st1d { z2.d }, p0, [x8, #5, mul vl]
; CHECK-NEXT:    mov z0.d, p1/m, #0 // =0x0
; CHECK-NEXT:    st1d { z29.d }, p0, [x8, #4, mul vl]
; CHECK-NEXT:    st1d { z1.d }, p0, [x8, #3, mul vl]
; CHECK-NEXT:    st1d { z5.d }, p0, [x8, #2, mul vl]
; CHECK-NEXT:    st1d { z26.d }, p0, [x8, #1, mul vl]
; CHECK-NEXT:    st1d { z0.d }, p0, [x8]
; CHECK-NEXT:    ldr z18, [sp, #1, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z17, [sp, #2, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z16, [sp, #3, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z15, [sp, #4, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z14, [sp, #5, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z13, [sp, #6, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z12, [sp, #7, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z11, [sp, #8, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z10, [sp, #9, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z9, [sp, #10, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr z8, [sp, #11, mul vl] // 16-byte Folded Reload
; CHECK-NEXT:    ldr p8, [sp, #3, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p7, [sp, #4, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p6, [sp, #5, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p5, [sp, #6, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    ldr p4, [sp, #7, mul vl] // 2-byte Folded Reload
; CHECK-NEXT:    addvl sp, sp, #12
; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT:    ret
  %a = call <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv16f64(<vscale x 32 x double> %x)
  ret <vscale x 32 x i64> %a
}
declare <vscale x 32 x i64> @llvm.llrint.nxv32i64.nxv32f64(<vscale x 32 x double>)