llvm/clang/test/CodeGen/aarch64-sve-vls-arith-ops.c

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
// RUN: -disable-O0-optnone -mvscale-min=4 -mvscale-max=4 \
// RUN:  -emit-llvm -o - %s | opt -S -passes=sroa | FileCheck %s

// REQUIRES: aarch64-registered-target

#include <arm_sve.h>

#define N 512

typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(N)));
typedef svint16_t fixed_int16_t __attribute__((arm_sve_vector_bits(N)));
typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N)));
typedef svint64_t fixed_int64_t __attribute__((arm_sve_vector_bits(N)));

typedef svuint8_t fixed_uint8_t __attribute__((arm_sve_vector_bits(N)));
typedef svuint16_t fixed_uint16_t __attribute__((arm_sve_vector_bits(N)));
typedef svuint32_t fixed_uint32_t __attribute__((arm_sve_vector_bits(N)));
typedef svuint64_t fixed_uint64_t __attribute__((arm_sve_vector_bits(N)));

typedef svfloat16_t fixed_float16_t __attribute__((arm_sve_vector_bits(N)));
typedef svfloat32_t fixed_float32_t __attribute__((arm_sve_vector_bits(N)));
typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N)));

typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N)));

// ADDITION

// CHECK-LABEL: @add_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t add_i8(fixed_int8_t a, fixed_int8_t b) {
  return a + b;
}

// CHECK-LABEL: @add_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t add_i16(fixed_int16_t a, fixed_int16_t b) {
  return a + b;
}

// CHECK-LABEL: @add_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t add_i32(fixed_int32_t a, fixed_int32_t b) {
  return a + b;
}

// CHECK-LABEL: @add_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t add_i64(fixed_int64_t a, fixed_int64_t b) {
  return a + b;
}

// CHECK-LABEL: @add_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t add_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a + b;
}

// CHECK-LABEL: @add_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t add_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a + b;
}

// CHECK-LABEL: @add_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t add_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a + b;
}

// CHECK-LABEL: @add_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t add_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a + b;
}

// CHECK-LABEL: @add_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float>
// CHECK-NEXT:    [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float>
// CHECK-NEXT:    [[ADD:%.*]] = fadd <32 x float> [[CONV]], [[CONV2]]
// CHECK-NEXT:    [[CONV3:%.*]] = fptrunc <32 x float> [[ADD]] to <32 x half>
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[CONV3]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t add_f16(fixed_float16_t a, fixed_float16_t b) {
  return a + b;
}

// CHECK-LABEL: @add_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = fadd <16 x float> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t add_f32(fixed_float32_t a, fixed_float32_t b) {
  return a + b;
}

// CHECK-LABEL: @add_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = fadd <8 x double> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t add_f64(fixed_float64_t a, fixed_float64_t b) {
  return a + b;
}

// CHECK-LABEL: @add_inplace_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t add_inplace_i8(fixed_int8_t a, fixed_int8_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t add_inplace_i16(fixed_int16_t a, fixed_int16_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t add_inplace_i32(fixed_int32_t a, fixed_int32_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t add_inplace_i64(fixed_int64_t a, fixed_int64_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t add_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t add_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t add_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = add <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t add_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[CONV:%.*]] = fpext <32 x half> [[B]] to <32 x float>
// CHECK-NEXT:    [[CONV2:%.*]] = fpext <32 x half> [[A]] to <32 x float>
// CHECK-NEXT:    [[ADD:%.*]] = fadd <32 x float> [[CONV2]], [[CONV]]
// CHECK-NEXT:    [[CONV3:%.*]] = fptrunc <32 x float> [[ADD]] to <32 x half>
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[CONV3]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t add_inplace_f16(fixed_float16_t a, fixed_float16_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = fadd <16 x float> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t add_inplace_f32(fixed_float32_t a, fixed_float32_t b) {
  return a += b;
}

// CHECK-LABEL: @add_inplace_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[ADD:%.*]] = fadd <8 x double> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t add_inplace_f64(fixed_float64_t a, fixed_float64_t b) {
  return a += b;
}

// CHECK-LABEL: @add_scalar_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = add <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t add_scalar_i8(fixed_int8_t a, int8_t b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = add <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t add_scalar_i16(fixed_int16_t a, int16_t b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = add <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t add_scalar_i32(fixed_int32_t a, int32_t b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = add <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t add_scalar_i64(fixed_int64_t a, int64_t b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = add <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t add_scalar_u8(fixed_uint8_t a, uint8_t b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = add <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t add_scalar_u16(fixed_uint16_t a, uint16_t b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = add <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t add_scalar_u32(fixed_uint32_t a, uint32_t b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = add <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t add_scalar_u64(fixed_uint64_t a, uint64_t b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = fadd <32 x half> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t add_scalar_f16(fixed_float16_t a, __fp16 b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = fadd <16 x float> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t add_scalar_f32(fixed_float32_t a, float b) {
  return a + b;
}

// CHECK-LABEL: @add_scalar_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[ADD:%.*]] = fadd <8 x double> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[ADD]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t add_scalar_f64(fixed_float64_t a, double b) {
  return a + b;
}

// SUBTRACTION

// CHECK-LABEL: @sub_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t sub_i8(fixed_int8_t a, fixed_int8_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t sub_i16(fixed_int16_t a, fixed_int16_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t sub_i32(fixed_int32_t a, fixed_int32_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t sub_i64(fixed_int64_t a, fixed_int64_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t sub_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t sub_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t sub_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t sub_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float>
// CHECK-NEXT:    [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float>
// CHECK-NEXT:    [[SUB:%.*]] = fsub <32 x float> [[CONV]], [[CONV2]]
// CHECK-NEXT:    [[CONV3:%.*]] = fptrunc <32 x float> [[SUB]] to <32 x half>
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[CONV3]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t sub_f16(fixed_float16_t a, fixed_float16_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = fsub <16 x float> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t sub_f32(fixed_float32_t a, fixed_float32_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = fsub <8 x double> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t sub_f64(fixed_float64_t a, fixed_float64_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t sub_inplace_i8(fixed_int8_t a, fixed_int8_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t sub_inplace_i16(fixed_int16_t a, fixed_int16_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t sub_inplace_i32(fixed_int32_t a, fixed_int32_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t sub_inplace_i64(fixed_int64_t a, fixed_int64_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t sub_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t sub_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t sub_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t sub_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float>
// CHECK-NEXT:    [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float>
// CHECK-NEXT:    [[SUB:%.*]] = fsub <32 x float> [[CONV]], [[CONV2]]
// CHECK-NEXT:    [[CONV3:%.*]] = fptrunc <32 x float> [[SUB]] to <32 x half>
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[CONV3]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t sub_inplace_f16(fixed_float16_t a, fixed_float16_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = fsub <16 x float> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t sub_inplace_f32(fixed_float32_t a, fixed_float32_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_inplace_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SUB:%.*]] = fsub <8 x double> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t sub_inplace_f64(fixed_float64_t a, fixed_float64_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = sub <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t sub_scalar_i8(fixed_int8_t a, int8_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t sub_scalar_i16(fixed_int16_t a, int16_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t sub_scalar_i32(fixed_int32_t a, int32_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t sub_scalar_i64(fixed_int64_t a, int64_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = sub <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t sub_scalar_u8(fixed_uint8_t a, uint8_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = sub <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t sub_scalar_u16(fixed_uint16_t a, uint16_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = sub <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t sub_scalar_u32(fixed_uint32_t a, uint32_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = sub <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t sub_scalar_u64(fixed_uint64_t a, uint64_t b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = fsub <32 x half> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t sub_scalar_f16(fixed_float16_t a, __fp16 b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = fsub <16 x float> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t sub_scalar_f32(fixed_float32_t a, float b) {
  return a - b;
}

// CHECK-LABEL: @sub_scalar_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[SUB:%.*]] = fsub <8 x double> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[SUB]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t sub_scalar_f64(fixed_float64_t a, double b) {
  return a - b;
}

// MULTIPLICATION

// CHECK-LABEL: @mul_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t mul_i8(fixed_int8_t a, fixed_int8_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t mul_i16(fixed_int16_t a, fixed_int16_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t mul_i32(fixed_int32_t a, fixed_int32_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t mul_i64(fixed_int64_t a, fixed_int64_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t mul_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t mul_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t mul_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t mul_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float>
// CHECK-NEXT:    [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float>
// CHECK-NEXT:    [[MUL:%.*]] = fmul <32 x float> [[CONV]], [[CONV2]]
// CHECK-NEXT:    [[CONV3:%.*]] = fptrunc <32 x float> [[MUL]] to <32 x half>
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[CONV3]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t mul_f16(fixed_float16_t a, fixed_float16_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = fmul <16 x float> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t mul_f32(fixed_float32_t a, fixed_float32_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = fmul <8 x double> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t mul_f64(fixed_float64_t a, fixed_float64_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t mul_inplace_i8(fixed_int8_t a, fixed_int8_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t mul_inplace_i16(fixed_int16_t a, fixed_int16_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t mul_inplace_i32(fixed_int32_t a, fixed_int32_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t mul_inplace_i64(fixed_int64_t a, fixed_int64_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t mul_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t mul_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t mul_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t mul_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float>
// CHECK-NEXT:    [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float>
// CHECK-NEXT:    [[MUL:%.*]] = fmul <32 x float> [[CONV]], [[CONV2]]
// CHECK-NEXT:    [[CONV3:%.*]] = fptrunc <32 x float> [[MUL]] to <32 x half>
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[CONV3]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t mul_inplace_f16(fixed_float16_t a, fixed_float16_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = fmul <16 x float> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t mul_inplace_f32(fixed_float32_t a, fixed_float32_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_inplace_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[MUL:%.*]] = fmul <8 x double> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t mul_inplace_f64(fixed_float64_t a, fixed_float64_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = mul <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t mul_scalar_i8(fixed_int8_t a, int8_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t mul_scalar_i16(fixed_int16_t a, int16_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t mul_scalar_i32(fixed_int32_t a, int32_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t mul_scalar_i64(fixed_int64_t a, int64_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = mul <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t mul_scalar_u8(fixed_uint8_t a, uint8_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = mul <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t mul_scalar_u16(fixed_uint16_t a, uint16_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = mul <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t mul_scalar_u32(fixed_uint32_t a, uint32_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = mul <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t mul_scalar_u64(fixed_uint64_t a, uint64_t b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = fmul <32 x half> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t mul_scalar_f16(fixed_float16_t a, __fp16 b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = fmul <16 x float> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t mul_scalar_f32(fixed_float32_t a, float b) {
  return a * b;
}

// CHECK-LABEL: @mul_scalar_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[MUL:%.*]] = fmul <8 x double> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[MUL]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t mul_scalar_f64(fixed_float64_t a, double b) {
  return a * b;
}

// DIVISION

// CHECK-LABEL: @div_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t div_i8(fixed_int8_t a, fixed_int8_t b) {
  return a / b;
}

// CHECK-LABEL: @div_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t div_i16(fixed_int16_t a, fixed_int16_t b) {
  return a / b;
}

// CHECK-LABEL: @div_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t div_i32(fixed_int32_t a, fixed_int32_t b) {
  return a / b;
}

// CHECK-LABEL: @div_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t div_i64(fixed_int64_t a, fixed_int64_t b) {
  return a / b;
}

// CHECK-LABEL: @div_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = udiv <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t div_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a / b;
}

// CHECK-LABEL: @div_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = udiv <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t div_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a / b;
}

// CHECK-LABEL: @div_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = udiv <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t div_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a / b;
}

// CHECK-LABEL: @div_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = udiv <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t div_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a / b;
}

// CHECK-LABEL: @div_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float>
// CHECK-NEXT:    [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float>
// CHECK-NEXT:    [[DIV:%.*]] = fdiv <32 x float> [[CONV]], [[CONV2]]
// CHECK-NEXT:    [[CONV3:%.*]] = fptrunc <32 x float> [[DIV]] to <32 x half>
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[CONV3]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t div_f16(fixed_float16_t a, fixed_float16_t b) {
  return a / b;
}

// CHECK-LABEL: @div_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = fdiv <16 x float> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t div_f32(fixed_float32_t a, fixed_float32_t b) {
  return a / b;
}

// CHECK-LABEL: @div_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = fdiv <8 x double> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t div_f64(fixed_float64_t a, fixed_float64_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t div_inplace_i8(fixed_int8_t a, fixed_int8_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t div_inplace_i16(fixed_int16_t a, fixed_int16_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t div_inplace_i32(fixed_int32_t a, fixed_int32_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t div_inplace_i64(fixed_int64_t a, fixed_int64_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = udiv <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t div_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = udiv <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t div_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = udiv <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t div_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = udiv <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t div_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[CONV:%.*]] = fpext <32 x half> [[A]] to <32 x float>
// CHECK-NEXT:    [[CONV2:%.*]] = fpext <32 x half> [[B]] to <32 x float>
// CHECK-NEXT:    [[DIV:%.*]] = fdiv <32 x float> [[CONV]], [[CONV2]]
// CHECK-NEXT:    [[CONV3:%.*]] = fptrunc <32 x float> [[DIV]] to <32 x half>
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[CONV3]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t div_inplace_f16(fixed_float16_t a, fixed_float16_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = fdiv <16 x float> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t div_inplace_f32(fixed_float32_t a, fixed_float32_t b) {
  return a / b;
}

// CHECK-LABEL: @div_inplace_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[DIV:%.*]] = fdiv <8 x double> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t div_inplace_f64(fixed_float64_t a, fixed_float64_t b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t div_scalar_i8(fixed_int8_t a, int8_t b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t div_scalar_i16(fixed_int16_t a, int16_t b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t div_scalar_i32(fixed_int32_t a, int32_t b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = sdiv <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t div_scalar_i64(fixed_int64_t a, int64_t b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = udiv <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t div_scalar_u8(fixed_uint8_t a, uint8_t b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = udiv <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t div_scalar_u16(fixed_uint16_t a, uint16_t b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = udiv <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t div_scalar_u32(fixed_uint32_t a, uint32_t b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = udiv <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t div_scalar_u64(fixed_uint64_t a, uint64_t b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_f16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x half> @llvm.vector.extract.v32f16.nxv8f16(<vscale x 8 x half> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x half> poison, half [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x half> [[SPLAT_SPLATINSERT]], <32 x half> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = fdiv <32 x half> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x half> @llvm.vector.insert.nxv8f16.v32f16(<vscale x 8 x half> undef, <32 x half> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x half> [[CASTSCALABLESVE]]
//
fixed_float16_t div_scalar_f16(fixed_float16_t a, __fp16 b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_f32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x float> @llvm.vector.extract.v16f32.nxv4f32(<vscale x 4 x float> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x float> poison, float [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x float> [[SPLAT_SPLATINSERT]], <16 x float> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = fdiv <16 x float> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x float> @llvm.vector.insert.nxv4f32.v16f32(<vscale x 4 x float> undef, <16 x float> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x float> [[CASTSCALABLESVE]]
//
fixed_float32_t div_scalar_f32(fixed_float32_t a, float b) {
  return a / b;
}

// CHECK-LABEL: @div_scalar_f64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x double> @llvm.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x double> poison, double [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x double> [[SPLAT_SPLATINSERT]], <8 x double> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[DIV:%.*]] = fdiv <8 x double> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[DIV]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
//
fixed_float64_t div_scalar_f64(fixed_float64_t a, double b) {
  return a / b;
}

// REMAINDER

// CHECK-LABEL: @rem_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = srem <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t rem_i8(fixed_int8_t a, fixed_int8_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = srem <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t rem_i16(fixed_int16_t a, fixed_int16_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = srem <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t rem_i32(fixed_int32_t a, fixed_int32_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = srem <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t rem_i64(fixed_int64_t a, fixed_int64_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = urem <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t rem_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = urem <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t rem_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = urem <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t rem_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = urem <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t rem_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_inplace_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = srem <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t rem_inplace_i8(fixed_int8_t a, fixed_int8_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_inplace_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = srem <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t rem_inplace_i16(fixed_int16_t a, fixed_int16_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_inplace_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = srem <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t rem_inplace_i32(fixed_int32_t a, fixed_int32_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_inplace_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = srem <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t rem_inplace_i64(fixed_int64_t a, fixed_int64_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_inplace_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = urem <64 x i8> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t rem_inplace_u8(fixed_uint8_t a, fixed_uint8_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_inplace_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = urem <32 x i16> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t rem_inplace_u16(fixed_uint16_t a, fixed_uint16_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_inplace_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = urem <16 x i32> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t rem_inplace_u32(fixed_uint32_t a, fixed_uint32_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_inplace_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[B_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[REM:%.*]] = urem <8 x i64> [[A]], [[B]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t rem_inplace_u64(fixed_uint64_t a, fixed_uint64_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_scalar_i8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[REM:%.*]] = srem <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_int8_t rem_scalar_i8(fixed_int8_t a, int8_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_scalar_i16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[REM:%.*]] = srem <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_int16_t rem_scalar_i16(fixed_int16_t a, int16_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_scalar_i32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[REM:%.*]] = srem <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_int32_t rem_scalar_i32(fixed_int32_t a, int32_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_scalar_i64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[REM:%.*]] = srem <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_int64_t rem_scalar_i64(fixed_int64_t a, int64_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_scalar_u8(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8(<vscale x 16 x i8> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <64 x i8> poison, i8 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <64 x i8> [[SPLAT_SPLATINSERT]], <64 x i8> poison, <64 x i32> zeroinitializer
// CHECK-NEXT:    [[REM:%.*]] = urem <64 x i8> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 16 x i8> @llvm.vector.insert.nxv16i8.v64i8(<vscale x 16 x i8> undef, <64 x i8> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 16 x i8> [[CASTSCALABLESVE]]
//
fixed_uint8_t rem_scalar_u8(fixed_uint8_t a, uint8_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_scalar_u16(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16(<vscale x 8 x i16> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i16> poison, i16 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i16> [[SPLAT_SPLATINSERT]], <32 x i16> poison, <32 x i32> zeroinitializer
// CHECK-NEXT:    [[REM:%.*]] = urem <32 x i16> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.v32i16(<vscale x 8 x i16> undef, <32 x i16> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 8 x i16> [[CASTSCALABLESVE]]
//
fixed_uint16_t rem_scalar_u16(fixed_uint16_t a, uint16_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_scalar_u32(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
// CHECK-NEXT:    [[REM:%.*]] = urem <16 x i32> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
//
fixed_uint32_t rem_scalar_u32(fixed_uint32_t a, uint32_t b) {
  return a % b;
}

// CHECK-LABEL: @rem_scalar_u64(
// CHECK-NEXT:  entry:
// CHECK-NEXT:    [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64(<vscale x 2 x i64> [[A_COERCE:%.*]], i64 0)
// CHECK-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i64> poison, i64 [[B:%.*]], i64 0
// CHECK-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i64> [[SPLAT_SPLATINSERT]], <8 x i64> poison, <8 x i32> zeroinitializer
// CHECK-NEXT:    [[REM:%.*]] = urem <8 x i64> [[A]], [[SPLAT_SPLAT]]
// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x i64> @llvm.vector.insert.nxv2i64.v8i64(<vscale x 2 x i64> undef, <8 x i64> [[REM]], i64 0)
// CHECK-NEXT:    ret <vscale x 2 x i64> [[CASTSCALABLESVE]]
//
fixed_uint64_t rem_scalar_u64(fixed_uint64_t a, uint64_t b) {
  return a % b;
}