llvm/llvm/test/Analysis/CostModel/RISCV/int-min-max.ll

; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v | FileCheck %s

define void @smax() {
; CHECK-LABEL: 'smax'
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.smax.i8(i8 undef, i8 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.smax.v2i8(<2 x i8> undef, <2 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.smax.v4i8(<4 x i8> undef, <4 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.smax.v8i8(<8 x i8> undef, <8 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.smax.v16i8(<16 x i8> undef, <16 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i8> @llvm.smax.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.smax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.smax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.smax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.smax.i16(i16 undef, i16 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.smax.v2i16(<2 x i16> undef, <2 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.smax.v4i16(<4 x i16> undef, <4 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.smax.v8i16(<8 x i16> undef, <8 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.smax.v16i16(<16 x i16> undef, <16 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.smax.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.smax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.smax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.smax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.smax.i32(i32 undef, i32 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.smax.v2i32(<2 x i32> undef, <2 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.smax.v4i32(<4 x i32> undef, <4 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.smax.v8i32(<8 x i32> undef, <8 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.smax.v16i32(<16 x i32> undef, <16 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.smax.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.smax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.smax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.smax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.smax.i64(i64 undef, i64 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.smax.v2i64(<2 x i64> undef, <2 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.smax.v4i64(<4 x i64> undef, <4 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.smax.v8i64(<8 x i64> undef, <8 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.smax.v16i64(<16 x i64> undef, <16 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.smax.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.smax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.smax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
;
  call i8 @llvm.smax.i8(i8 undef, i8 undef)
  call <2 x i8> @llvm.smax.v2i8(<2 x i8> undef, <2 x i8> undef)
  call <4 x i8> @llvm.smax.v4i8(<4 x i8> undef, <4 x i8> undef)
  call <8 x i8> @llvm.smax.v8i8(<8 x i8> undef, <8 x i8> undef)
  call <16 x i8> @llvm.smax.v16i8(<16 x i8> undef, <16 x i8> undef)
  call <vscale x 1 x i8> @llvm.smax.nvx1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef)
  call <vscale x 2 x i8> @llvm.smax.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
  call <vscale x 4 x i8> @llvm.smax.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
  call <vscale x 8 x i8> @llvm.smax.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
  call <vscale x 16 x i8> @llvm.smax.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
  call i16 @llvm.smax.i16(i16 undef, i16 undef)
  call <2 x i16> @llvm.smax.v2i16(<2 x i16> undef, <2 x i16> undef)
  call <4 x i16> @llvm.smax.v4i16(<4 x i16> undef, <4 x i16> undef)
  call <8 x i16> @llvm.smax.v8i16(<8 x i16> undef, <8 x i16> undef)
  call <16 x i16> @llvm.smax.v16i16(<16 x i16> undef, <16 x i16> undef)
  call <vscale x 1 x i16> @llvm.smax.nvx1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
  call <vscale x 2 x i16> @llvm.smax.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
  call <vscale x 4 x i16> @llvm.smax.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
  call <vscale x 8 x i16> @llvm.smax.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
  call <vscale x 16 x i16> @llvm.smax.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
  call i32 @llvm.smax.i32(i32 undef, i32 undef)
  call <2 x i32> @llvm.smax.v2i32(<2 x i32> undef, <2 x i32> undef)
  call <4 x i32> @llvm.smax.v4i32(<4 x i32> undef, <4 x i32> undef)
  call <8 x i32> @llvm.smax.v8i32(<8 x i32> undef, <8 x i32> undef)
  call <16 x i32> @llvm.smax.v16i32(<16 x i32> undef, <16 x i32> undef)
  call <vscale x 1 x i32> @llvm.smax.nvx1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
  call <vscale x 2 x i32> @llvm.smax.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
  call <vscale x 4 x i32> @llvm.smax.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
  call <vscale x 8 x i32> @llvm.smax.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
  call <vscale x 16 x i32> @llvm.smax.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
  call i64 @llvm.smax.i64(i64 undef, i64 undef)
  call <2 x i64> @llvm.smax.v2i64(<2 x i64> undef, <2 x i64> undef)
  call <4 x i64> @llvm.smax.v4i64(<4 x i64> undef, <4 x i64> undef)
  call <8 x i64> @llvm.smax.v8i64(<8 x i64> undef, <8 x i64> undef)
  call <16 x i64> @llvm.smax.v16i64(<16 x i64> undef, <16 x i64> undef)
  call <vscale x 1 x i64> @llvm.smax.nvx1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
  call <vscale x 2 x i64> @llvm.smax.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
  call <vscale x 4 x i64> @llvm.smax.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
  call <vscale x 8 x i64> @llvm.smax.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
  ret void
}

define void @smin() {
; CHECK-LABEL: 'smin'
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.smin.i8(i8 undef, i8 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.smin.v2i8(<2 x i8> undef, <2 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.smin.v4i8(<4 x i8> undef, <4 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.smin.v8i8(<8 x i8> undef, <8 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.smin.v16i8(<16 x i8> undef, <16 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i8> @llvm.smin.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.smin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.smin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.smin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.smin.i16(i16 undef, i16 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.smin.v2i16(<2 x i16> undef, <2 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.smin.v4i16(<4 x i16> undef, <4 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.smin.v8i16(<8 x i16> undef, <8 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.smin.v16i16(<16 x i16> undef, <16 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.smin.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.smin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.smin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.smin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.smin.i32(i32 undef, i32 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.smin.v2i32(<2 x i32> undef, <2 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> undef, <4 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> undef, <8 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> undef, <16 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.smin.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.smin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.smin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.smin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.smin.i64(i64 undef, i64 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.smin.v2i64(<2 x i64> undef, <2 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> undef, <4 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.smin.v8i64(<8 x i64> undef, <8 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.smin.v16i64(<16 x i64> undef, <16 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.smin.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.smin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.smin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
;
  call i8 @llvm.smin.i8(i8 undef, i8 undef)
  call <2 x i8> @llvm.smin.v2i8(<2 x i8> undef, <2 x i8> undef)
  call <4 x i8> @llvm.smin.v4i8(<4 x i8> undef, <4 x i8> undef)
  call <8 x i8> @llvm.smin.v8i8(<8 x i8> undef, <8 x i8> undef)
  call <16 x i8> @llvm.smin.v16i8(<16 x i8> undef, <16 x i8> undef)
  call <vscale x 1 x i8> @llvm.smin.nvx1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef)
  call <vscale x 2 x i8> @llvm.smin.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
  call <vscale x 4 x i8> @llvm.smin.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
  call <vscale x 8 x i8> @llvm.smin.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
  call <vscale x 16 x i8> @llvm.smin.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
  call i16 @llvm.smin.i16(i16 undef, i16 undef)
  call <2 x i16> @llvm.smin.v2i16(<2 x i16> undef, <2 x i16> undef)
  call <4 x i16> @llvm.smin.v4i16(<4 x i16> undef, <4 x i16> undef)
  call <8 x i16> @llvm.smin.v8i16(<8 x i16> undef, <8 x i16> undef)
  call <16 x i16> @llvm.smin.v16i16(<16 x i16> undef, <16 x i16> undef)
  call <vscale x 1 x i16> @llvm.smin.nvx1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
  call <vscale x 2 x i16> @llvm.smin.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
  call <vscale x 4 x i16> @llvm.smin.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
  call <vscale x 8 x i16> @llvm.smin.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
  call <vscale x 16 x i16> @llvm.smin.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
  call i32 @llvm.smin.i32(i32 undef, i32 undef)
  call <2 x i32> @llvm.smin.v2i32(<2 x i32> undef, <2 x i32> undef)
  call <4 x i32> @llvm.smin.v4i32(<4 x i32> undef, <4 x i32> undef)
  call <8 x i32> @llvm.smin.v8i32(<8 x i32> undef, <8 x i32> undef)
  call <16 x i32> @llvm.smin.v16i32(<16 x i32> undef, <16 x i32> undef)
  call <vscale x 1 x i32> @llvm.smin.nvx1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
  call <vscale x 2 x i32> @llvm.smin.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
  call <vscale x 4 x i32> @llvm.smin.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
  call <vscale x 8 x i32> @llvm.smin.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
  call <vscale x 16 x i32> @llvm.smin.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
  call i64 @llvm.smin.i64(i64 undef, i64 undef)
  call <2 x i64> @llvm.smin.v2i64(<2 x i64> undef, <2 x i64> undef)
  call <4 x i64> @llvm.smin.v4i64(<4 x i64> undef, <4 x i64> undef)
  call <8 x i64> @llvm.smin.v8i64(<8 x i64> undef, <8 x i64> undef)
  call <16 x i64> @llvm.smin.v16i64(<16 x i64> undef, <16 x i64> undef)
  call <vscale x 1 x i64> @llvm.smin.nvx1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
  call <vscale x 2 x i64> @llvm.smin.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
  call <vscale x 4 x i64> @llvm.smin.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
  call <vscale x 8 x i64> @llvm.smin.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
  ret void
}

define void @umax() {
; CHECK-LABEL: 'umax'
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.umax.i8(i8 undef, i8 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.umax.v2i8(<2 x i8> undef, <2 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.umax.v4i8(<4 x i8> undef, <4 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.umax.v8i8(<8 x i8> undef, <8 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.umax.v16i8(<16 x i8> undef, <16 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i8> @llvm.umax.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.umax.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.umax.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.umax.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.umax.i16(i16 undef, i16 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.umax.v2i16(<2 x i16> undef, <2 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.umax.v4i16(<4 x i16> undef, <4 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.umax.v8i16(<8 x i16> undef, <8 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.umax.v16i16(<16 x i16> undef, <16 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.umax.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.umax.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.umax.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.umax.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.umax.i32(i32 undef, i32 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.umax.v2i32(<2 x i32> undef, <2 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.umax.v4i32(<4 x i32> undef, <4 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.umax.v8i32(<8 x i32> undef, <8 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.umax.v16i32(<16 x i32> undef, <16 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.umax.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.umax.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.umax.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.umax.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.umax.i64(i64 undef, i64 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.umax.v2i64(<2 x i64> undef, <2 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.umax.v4i64(<4 x i64> undef, <4 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.umax.v8i64(<8 x i64> undef, <8 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.umax.v16i64(<16 x i64> undef, <16 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.umax.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.umax.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.umax.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
;
  call i8 @llvm.umax.i8(i8 undef, i8 undef)
  call <2 x i8> @llvm.umax.v2i8(<2 x i8> undef, <2 x i8> undef)
  call <4 x i8> @llvm.umax.v4i8(<4 x i8> undef, <4 x i8> undef)
  call <8 x i8> @llvm.umax.v8i8(<8 x i8> undef, <8 x i8> undef)
  call <16 x i8> @llvm.umax.v16i8(<16 x i8> undef, <16 x i8> undef)
  call <vscale x 1 x i8> @llvm.umax.nvx1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef)
  call <vscale x 2 x i8> @llvm.umax.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
  call <vscale x 4 x i8> @llvm.umax.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
  call <vscale x 8 x i8> @llvm.umax.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
  call <vscale x 16 x i8> @llvm.umax.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
  call i16 @llvm.umax.i16(i16 undef, i16 undef)
  call <2 x i16> @llvm.umax.v2i16(<2 x i16> undef, <2 x i16> undef)
  call <4 x i16> @llvm.umax.v4i16(<4 x i16> undef, <4 x i16> undef)
  call <8 x i16> @llvm.umax.v8i16(<8 x i16> undef, <8 x i16> undef)
  call <16 x i16> @llvm.umax.v16i16(<16 x i16> undef, <16 x i16> undef)
  call <vscale x 1 x i16> @llvm.umax.nvx1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
  call <vscale x 2 x i16> @llvm.umax.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
  call <vscale x 4 x i16> @llvm.umax.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
  call <vscale x 8 x i16> @llvm.umax.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
  call <vscale x 16 x i16> @llvm.umax.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
  call i32 @llvm.umax.i32(i32 undef, i32 undef)
  call <2 x i32> @llvm.umax.v2i32(<2 x i32> undef, <2 x i32> undef)
  call <4 x i32> @llvm.umax.v4i32(<4 x i32> undef, <4 x i32> undef)
  call <8 x i32> @llvm.umax.v8i32(<8 x i32> undef, <8 x i32> undef)
  call <16 x i32> @llvm.umax.v16i32(<16 x i32> undef, <16 x i32> undef)
  call <vscale x 1 x i32> @llvm.umax.nvx1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
  call <vscale x 2 x i32> @llvm.umax.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
  call <vscale x 4 x i32> @llvm.umax.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
  call <vscale x 8 x i32> @llvm.umax.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
  call <vscale x 16 x i32> @llvm.umax.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
  call i64 @llvm.umax.i64(i64 undef, i64 undef)
  call <2 x i64> @llvm.umax.v2i64(<2 x i64> undef, <2 x i64> undef)
  call <4 x i64> @llvm.umax.v4i64(<4 x i64> undef, <4 x i64> undef)
  call <8 x i64> @llvm.umax.v8i64(<8 x i64> undef, <8 x i64> undef)
  call <16 x i64> @llvm.umax.v16i64(<16 x i64> undef, <16 x i64> undef)
  call <vscale x 1 x i64> @llvm.umax.nvx1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
  call <vscale x 2 x i64> @llvm.umax.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
  call <vscale x 4 x i64> @llvm.umax.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
  call <vscale x 8 x i64> @llvm.umax.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
  ret void
}

define void @umin() {
; CHECK-LABEL: 'umin'
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %1 = call i8 @llvm.umin.i8(i8 undef, i8 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.umin.v2i8(<2 x i8> undef, <2 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.umin.v4i8(<4 x i8> undef, <4 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.umin.v8i8(<8 x i8> undef, <8 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.umin.v16i8(<16 x i8> undef, <16 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i8> @llvm.umin.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i8> @llvm.umin.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i8> @llvm.umin.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i8> @llvm.umin.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %11 = call i16 @llvm.umin.i16(i16 undef, i16 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i16> @llvm.umin.v2i16(<2 x i16> undef, <2 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i16> @llvm.umin.v4i16(<4 x i16> undef, <4 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i16> @llvm.umin.v8i16(<8 x i16> undef, <8 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i16> @llvm.umin.v16i16(<16 x i16> undef, <16 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i16> @llvm.umin.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i16> @llvm.umin.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i16> @llvm.umin.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %19 = call <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %20 = call <vscale x 16 x i16> @llvm.umin.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %21 = call i32 @llvm.umin.i32(i32 undef, i32 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %22 = call <2 x i32> @llvm.umin.v2i32(<2 x i32> undef, <2 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %23 = call <4 x i32> @llvm.umin.v4i32(<4 x i32> undef, <4 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %24 = call <8 x i32> @llvm.umin.v8i32(<8 x i32> undef, <8 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %25 = call <16 x i32> @llvm.umin.v16i32(<16 x i32> undef, <16 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %26 = call <vscale x 1 x i32> @llvm.umin.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %27 = call <vscale x 2 x i32> @llvm.umin.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %28 = call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %29 = call <vscale x 8 x i32> @llvm.umin.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %30 = call <vscale x 16 x i32> @llvm.umin.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %31 = call i64 @llvm.umin.i64(i64 undef, i64 undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %32 = call <2 x i64> @llvm.umin.v2i64(<2 x i64> undef, <2 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %33 = call <4 x i64> @llvm.umin.v4i64(<4 x i64> undef, <4 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %34 = call <8 x i64> @llvm.umin.v8i64(<8 x i64> undef, <8 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %35 = call <16 x i64> @llvm.umin.v16i64(<16 x i64> undef, <16 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %36 = call <vscale x 1 x i64> @llvm.umin.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %37 = call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %38 = call <vscale x 4 x i64> @llvm.umin.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %39 = call <vscale x 8 x i64> @llvm.umin.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
;
  call i8 @llvm.umin.i8(i8 undef, i8 undef)
  call <2 x i8> @llvm.umin.v2i8(<2 x i8> undef, <2 x i8> undef)
  call <4 x i8> @llvm.umin.v4i8(<4 x i8> undef, <4 x i8> undef)
  call <8 x i8> @llvm.umin.v8i8(<8 x i8> undef, <8 x i8> undef)
  call <16 x i8> @llvm.umin.v16i8(<16 x i8> undef, <16 x i8> undef)
  call <vscale x 1 x i8> @llvm.umin.nvx1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef)
  call <vscale x 2 x i8> @llvm.umin.nvx2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
  call <vscale x 4 x i8> @llvm.umin.nvx4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
  call <vscale x 8 x i8> @llvm.umin.nvx8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
  call <vscale x 16 x i8> @llvm.umin.nvx16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
  call i16 @llvm.umin.i16(i16 undef, i16 undef)
  call <2 x i16> @llvm.umin.v2i16(<2 x i16> undef, <2 x i16> undef)
  call <4 x i16> @llvm.umin.v4i16(<4 x i16> undef, <4 x i16> undef)
  call <8 x i16> @llvm.umin.v8i16(<8 x i16> undef, <8 x i16> undef)
  call <16 x i16> @llvm.umin.v16i16(<16 x i16> undef, <16 x i16> undef)
  call <vscale x 1 x i16> @llvm.umin.nvx1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef)
  call <vscale x 2 x i16> @llvm.umin.nvx2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
  call <vscale x 4 x i16> @llvm.umin.nvx4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
  call <vscale x 8 x i16> @llvm.umin.nvx8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
  call <vscale x 16 x i16> @llvm.umin.nvx16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
  call i32 @llvm.umin.i32(i32 undef, i32 undef)
  call <2 x i32> @llvm.umin.v2i32(<2 x i32> undef, <2 x i32> undef)
  call <4 x i32> @llvm.umin.v4i32(<4 x i32> undef, <4 x i32> undef)
  call <8 x i32> @llvm.umin.v8i32(<8 x i32> undef, <8 x i32> undef)
  call <16 x i32> @llvm.umin.v16i32(<16 x i32> undef, <16 x i32> undef)
  call <vscale x 1 x i32> @llvm.umin.nvx1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef)
  call <vscale x 2 x i32> @llvm.umin.nvx2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
  call <vscale x 4 x i32> @llvm.umin.nvx4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
  call <vscale x 8 x i32> @llvm.umin.nvx8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
  call <vscale x 16 x i32> @llvm.umin.nvx16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
  call i64 @llvm.umin.i64(i64 undef, i64 undef)
  call <2 x i64> @llvm.umin.v2i64(<2 x i64> undef, <2 x i64> undef)
  call <4 x i64> @llvm.umin.v4i64(<4 x i64> undef, <4 x i64> undef)
  call <8 x i64> @llvm.umin.v8i64(<8 x i64> undef, <8 x i64> undef)
  call <16 x i64> @llvm.umin.v16i64(<16 x i64> undef, <16 x i64> undef)
  call <vscale x 1 x i64> @llvm.umin.nvx1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef)
  call <vscale x 2 x i64> @llvm.umin.nvx2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
  call <vscale x 4 x i64> @llvm.umin.nvx4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
  call <vscale x 8 x i64> @llvm.umin.nvx8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
  ret void
}

declare i8 @llvm.smax.i8(i8, i8)
declare <2 x i8> @llvm.smax.v2i8(<2 x i8>, <2 x i8>)
declare <4 x i8> @llvm.smax.v4i8(<4 x i8>, <4 x i8>)
declare <8 x i8> @llvm.smax.v8i8(<8 x i8>, <8 x i8>)
declare <16 x i8> @llvm.smax.v16i8(<16 x i8>, <16 x i8>)
declare <vscale x 1 x i8> @llvm.smax.nvx1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
declare <vscale x 2 x i8> @llvm.smax.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
declare <vscale x 4 x i8> @llvm.smax.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
declare <vscale x 8 x i8> @llvm.smax.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
declare <vscale x 16 x i8> @llvm.smax.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
declare i16 @llvm.smax.i16(i16, i16)
declare <2 x i16> @llvm.smax.v2i16(<2 x i16>, <2 x i16>)
declare <4 x i16> @llvm.smax.v4i16(<4 x i16>, <4 x i16>)
declare <8 x i16> @llvm.smax.v8i16(<8 x i16>, <8 x i16>)
declare <16 x i16> @llvm.smax.v16i16(<16 x i16>, <16 x i16>)
declare <vscale x 1 x i16> @llvm.smax.nvx1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
declare <vscale x 2 x i16> @llvm.smax.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
declare <vscale x 4 x i16> @llvm.smax.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
declare <vscale x 8 x i16> @llvm.smax.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 16 x i16> @llvm.smax.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
declare i32 @llvm.smax.i32(i32, i32)
declare <2 x i32> @llvm.smax.v2i32(<2 x i32>, <2 x i32>)
declare <4 x i32> @llvm.smax.v4i32(<4 x i32>, <4 x i32>)
declare <8 x i32> @llvm.smax.v8i32(<8 x i32>, <8 x i32>)
declare <16 x i32> @llvm.smax.v16i32(<16 x i32>, <16 x i32>)
declare <vscale x 1 x i32> @llvm.smax.nvx1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
declare <vscale x 2 x i32> @llvm.smax.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
declare <vscale x 4 x i32> @llvm.smax.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 8 x i32> @llvm.smax.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
declare <vscale x 16 x i32> @llvm.smax.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
declare i64 @llvm.smax.i64(i64, i64)
declare <2 x i64> @llvm.smax.v2i64(<2 x i64>, <2 x i64>)
declare <4 x i64> @llvm.smax.v4i64(<4 x i64>, <4 x i64>)
declare <8 x i64> @llvm.smax.v8i64(<8 x i64>, <8 x i64>)
declare <16 x i64> @llvm.smax.v16i64(<16 x i64>, <16 x i64>)
declare <vscale x 1 x i64> @llvm.smax.nvx1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
declare <vscale x 2 x i64> @llvm.smax.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
declare <vscale x 4 x i64> @llvm.smax.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
declare <vscale x 8 x i64> @llvm.smax.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)

declare i8 @llvm.smin.i8(i8, i8)
declare <2 x i8> @llvm.smin.v2i8(<2 x i8>, <2 x i8>)
declare <4 x i8> @llvm.smin.v4i8(<4 x i8>, <4 x i8>)
declare <8 x i8> @llvm.smin.v8i8(<8 x i8>, <8 x i8>)
declare <16 x i8> @llvm.smin.v16i8(<16 x i8>, <16 x i8>)
declare <vscale x 1 x i8> @llvm.smin.nvx1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
declare <vscale x 2 x i8> @llvm.smin.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
declare <vscale x 4 x i8> @llvm.smin.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
declare <vscale x 8 x i8> @llvm.smin.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
declare <vscale x 16 x i8> @llvm.smin.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
declare i16 @llvm.smin.i16(i16, i16)
declare <2 x i16> @llvm.smin.v2i16(<2 x i16>, <2 x i16>)
declare <4 x i16> @llvm.smin.v4i16(<4 x i16>, <4 x i16>)
declare <8 x i16> @llvm.smin.v8i16(<8 x i16>, <8 x i16>)
declare <16 x i16> @llvm.smin.v16i16(<16 x i16>, <16 x i16>)
declare <vscale x 1 x i16> @llvm.smin.nvx1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
declare <vscale x 2 x i16> @llvm.smin.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
declare <vscale x 4 x i16> @llvm.smin.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
declare <vscale x 8 x i16> @llvm.smin.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 16 x i16> @llvm.smin.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
declare i32 @llvm.smin.i32(i32, i32)
declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>)
declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>)
declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>)
declare <16 x i32> @llvm.smin.v16i32(<16 x i32>, <16 x i32>)
declare <vscale x 1 x i32> @llvm.smin.nvx1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
declare <vscale x 2 x i32> @llvm.smin.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
declare <vscale x 4 x i32> @llvm.smin.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 8 x i32> @llvm.smin.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
declare <vscale x 16 x i32> @llvm.smin.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
declare i64 @llvm.smin.i64(i64, i64)
declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>)
declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>)
declare <8 x i64> @llvm.smin.v8i64(<8 x i64>, <8 x i64>)
declare <16 x i64> @llvm.smin.v16i64(<16 x i64>, <16 x i64>)
declare <vscale x 1 x i64> @llvm.smin.nvx1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
declare <vscale x 2 x i64> @llvm.smin.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
declare <vscale x 4 x i64> @llvm.smin.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
declare <vscale x 8 x i64> @llvm.smin.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)

declare i8 @llvm.umax.i8(i8, i8)
declare <2 x i8> @llvm.umax.v2i8(<2 x i8>, <2 x i8>)
declare <4 x i8> @llvm.umax.v4i8(<4 x i8>, <4 x i8>)
declare <8 x i8> @llvm.umax.v8i8(<8 x i8>, <8 x i8>)
declare <16 x i8> @llvm.umax.v16i8(<16 x i8>, <16 x i8>)
declare <vscale x 1 x i8> @llvm.umax.nvx1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
declare <vscale x 2 x i8> @llvm.umax.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
declare <vscale x 4 x i8> @llvm.umax.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
declare <vscale x 8 x i8> @llvm.umax.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
declare <vscale x 16 x i8> @llvm.umax.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
declare i16 @llvm.umax.i16(i16, i16)
declare <2 x i16> @llvm.umax.v2i16(<2 x i16>, <2 x i16>)
declare <4 x i16> @llvm.umax.v4i16(<4 x i16>, <4 x i16>)
declare <8 x i16> @llvm.umax.v8i16(<8 x i16>, <8 x i16>)
declare <16 x i16> @llvm.umax.v16i16(<16 x i16>, <16 x i16>)
declare <vscale x 1 x i16> @llvm.umax.nvx1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
declare <vscale x 2 x i16> @llvm.umax.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
declare <vscale x 4 x i16> @llvm.umax.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
declare <vscale x 8 x i16> @llvm.umax.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 16 x i16> @llvm.umax.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
declare i32 @llvm.umax.i32(i32, i32)
declare <2 x i32> @llvm.umax.v2i32(<2 x i32>, <2 x i32>)
declare <4 x i32> @llvm.umax.v4i32(<4 x i32>, <4 x i32>)
declare <8 x i32> @llvm.umax.v8i32(<8 x i32>, <8 x i32>)
declare <16 x i32> @llvm.umax.v16i32(<16 x i32>, <16 x i32>)
declare <vscale x 1 x i32> @llvm.umax.nvx1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
declare <vscale x 2 x i32> @llvm.umax.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
declare <vscale x 4 x i32> @llvm.umax.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 8 x i32> @llvm.umax.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
declare <vscale x 16 x i32> @llvm.umax.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
declare i64 @llvm.umax.i64(i64, i64)
declare <2 x i64> @llvm.umax.v2i64(<2 x i64>, <2 x i64>)
declare <4 x i64> @llvm.umax.v4i64(<4 x i64>, <4 x i64>)
declare <8 x i64> @llvm.umax.v8i64(<8 x i64>, <8 x i64>)
declare <16 x i64> @llvm.umax.v16i64(<16 x i64>, <16 x i64>)
declare <vscale x 1 x i64> @llvm.umax.nvx1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
declare <vscale x 2 x i64> @llvm.umax.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
declare <vscale x 4 x i64> @llvm.umax.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
declare <vscale x 8 x i64> @llvm.umax.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)

declare i8 @llvm.umin.i8(i8, i8)
declare <2 x i8> @llvm.umin.v2i8(<2 x i8>, <2 x i8>)
declare <4 x i8> @llvm.umin.v4i8(<4 x i8>, <4 x i8>)
declare <8 x i8> @llvm.umin.v8i8(<8 x i8>, <8 x i8>)
declare <16 x i8> @llvm.umin.v16i8(<16 x i8>, <16 x i8>)
declare <vscale x 1 x i8> @llvm.umin.nvx1i8(<vscale x 1 x i8>, <vscale x 1 x i8>)
declare <vscale x 2 x i8> @llvm.umin.nvx2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
declare <vscale x 4 x i8> @llvm.umin.nvx4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
declare <vscale x 8 x i8> @llvm.umin.nvx8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
declare <vscale x 16 x i8> @llvm.umin.nvx16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
declare i16 @llvm.umin.i16(i16, i16)
declare <2 x i16> @llvm.umin.v2i16(<2 x i16>, <2 x i16>)
declare <4 x i16> @llvm.umin.v4i16(<4 x i16>, <4 x i16>)
declare <8 x i16> @llvm.umin.v8i16(<8 x i16>, <8 x i16>)
declare <16 x i16> @llvm.umin.v16i16(<16 x i16>, <16 x i16>)
declare <vscale x 1 x i16> @llvm.umin.nvx1i16(<vscale x 1 x i16>, <vscale x 1 x i16>)
declare <vscale x 2 x i16> @llvm.umin.nvx2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
declare <vscale x 4 x i16> @llvm.umin.nvx4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
declare <vscale x 8 x i16> @llvm.umin.nvx8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
declare <vscale x 16 x i16> @llvm.umin.nvx16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
declare i32 @llvm.umin.i32(i32, i32)
declare <2 x i32> @llvm.umin.v2i32(<2 x i32>, <2 x i32>)
declare <4 x i32> @llvm.umin.v4i32(<4 x i32>, <4 x i32>)
declare <8 x i32> @llvm.umin.v8i32(<8 x i32>, <8 x i32>)
declare <16 x i32> @llvm.umin.v16i32(<16 x i32>, <16 x i32>)
declare <vscale x 1 x i32> @llvm.umin.nvx1i32(<vscale x 1 x i32>, <vscale x 1 x i32>)
declare <vscale x 2 x i32> @llvm.umin.nvx2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
declare <vscale x 4 x i32> @llvm.umin.nvx4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
declare <vscale x 8 x i32> @llvm.umin.nvx8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
declare <vscale x 16 x i32> @llvm.umin.nvx16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
declare i64 @llvm.umin.i64(i64, i64)
declare <2 x i64> @llvm.umin.v2i64(<2 x i64>, <2 x i64>)
declare <4 x i64> @llvm.umin.v4i64(<4 x i64>, <4 x i64>)
declare <8 x i64> @llvm.umin.v8i64(<8 x i64>, <8 x i64>)
declare <16 x i64> @llvm.umin.v16i64(<16 x i64>, <16 x i64>)
declare <vscale x 1 x i64> @llvm.umin.nvx1i64(<vscale x 1 x i64>, <vscale x 1 x i64>)
declare <vscale x 2 x i64> @llvm.umin.nvx2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
declare <vscale x 4 x i64> @llvm.umin.nvx4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
declare <vscale x 8 x i64> @llvm.umin.nvx8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)