# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs -global-isel=1 -run-pass=legalizer -o - %s | FileCheck -check-prefix=GFX10 %s
---
name: v_mul_i64_no_zext
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-LABEL: name: v_mul_i64_no_zext
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_MUL %0, %1
$vgpr0_vgpr1 = COPY %2
...
---
name: v_mul_i64_zext_src1
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2
; GFX10-LABEL: name: v_mul_i64_zext_src1
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[COPY1]], [[C]]
; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[COPY1]]
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
%2:_(s64) = G_ZEXT %1(s32)
%3:_(s64) = G_MUL %0, %2
$vgpr0_vgpr1 = COPY %3
...
---
name: v_mul_i64_zext_src0
body: |
bb.0:
liveins: $vgpr0, $vgpr2_vgpr3
; GFX10-LABEL: name: v_mul_i64_zext_src0
; GFX10: liveins: $vgpr0, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[COPY]](s32), [[UV2]], [[C]]
; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[UV3]]
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s32) = COPY $vgpr0
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_ZEXT %0(s32)
%3:_(s64) = G_MUL %2, %1
$vgpr0_vgpr1 = COPY %3
...
---
name: v_mul_i64_zext_src0_src1
body: |
bb.0:
liveins: $vgpr0, $vgpr1
; GFX10-LABEL: name: v_mul_i64_zext_src0_src1
; GFX10: liveins: $vgpr0, $vgpr1
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s64) = G_ZEXT %0(s32)
%3:_(s64) = G_ZEXT %1(s32)
%4:_(s64) = G_MUL %2, %3
$vgpr0_vgpr1 = COPY %3
...
---
name: v_mul_i64_masked_src0_hi
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-LABEL: name: v_mul_i64_masked_src0_hi
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
; GFX10-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C1]]
; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_CONSTANT i64 4294967295
%3:_(s64) = G_AND %0, %2
%4:_(s64) = G_MUL %3, %1
$vgpr0_vgpr1 = COPY %4
...
---
name: v_mul_i64_masked_src0_lo
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-LABEL: name: v_mul_i64_masked_src0_lo
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
; GFX10-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[MUL]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_CONSTANT i64 -4294967296
%3:_(s64) = G_AND %0, %2
%4:_(s64) = G_MUL %3, %1
$vgpr0_vgpr1 = COPY %4
...
---
name: v_mul_i64_masked_src1_lo
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-LABEL: name: v_mul_i64_masked_src1_lo
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
; GFX10-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY1]], [[C]]
; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[MUL]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_CONSTANT i64 -4294967296
%3:_(s64) = G_AND %1, %2
%4:_(s64) = G_MUL %0, %3
$vgpr0_vgpr1 = COPY %4
...
---
name: v_mul_i64_masked_src0
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-LABEL: name: v_mul_i64_masked_src0
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[C]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_CONSTANT i64 0
%3:_(s64) = G_AND %0, %2
%4:_(s64) = G_MUL %3, %1
$vgpr0_vgpr1 = COPY %4
...
---
name: v_mul_i64_partially_masked_src0
body: |
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-LABEL: name: v_mul_i64_partially_masked_src0
; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 263951509094400
; GFX10-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C1]]
; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_CONSTANT i64 263951509094400
%3:_(s64) = G_AND %0, %2
%4:_(s64) = G_MUL %3, %1
$vgpr0_vgpr1 = COPY %4
...
---
name: v_mul_i64_constant_hi
body: |
bb.0:
liveins: $vgpr0_vgpr1
; GFX10-LABEL: name: v_mul_i64_constant_hi
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296
; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[MUL]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = G_CONSTANT i64 -4294967296
%2:_(s64) = G_MUL %0, %1
$vgpr0_vgpr1 = COPY %2
...
---
name: v_mul_i64_constant_lo
body: |
bb.0:
liveins: $vgpr0_vgpr1
; GFX10-LABEL: name: v_mul_i64_constant_lo
; GFX10: liveins: $vgpr0_vgpr1
; GFX10-NEXT: {{ $}}
; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4294967295
; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C1]]
; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD]](s32)
; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = G_CONSTANT i64 4294967295
%2:_(s64) = G_MUL %0, %1
$vgpr0_vgpr1 = COPY %2
...