# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=x86_64-linux-gnu -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SSE
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512F
# RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -mattr=+avx512vl -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=AVX512VL
--- |
define i8 @test_load_i8(ptr %p1) {
%r = load atomic i8, ptr %p1 unordered, align 1
ret i8 %r
}
define i16 @test_load_i16(ptr %p1) {
%r = load atomic i16, ptr %p1 unordered, align 2
ret i16 %r
}
define i32 @test_load_i32(ptr %p1) {
%r = load atomic i32, ptr %p1 unordered, align 4
ret i32 %r
}
define i64 @test_load_i64(ptr %p1) {
%r = load atomic i64, ptr %p1 unordered, align 8
ret i64 %r
}
define float @test_load_float(ptr %p1) {
%r = load atomic float, ptr %p1 unordered, align 4
ret float %r
}
define float @test_load_float_vecreg(ptr %p1) {
%r = load atomic float, ptr %p1 unordered, align 8
ret float %r
}
define double @test_load_double(ptr %p1) {
%r = load atomic double, ptr %p1 unordered, align 8
ret double %r
}
define double @test_load_double_vecreg(ptr %p1) {
%r = load atomic double, ptr %p1 unordered, align 8
ret double %r
}
define ptr @test_store_i32(i32 %val, ptr %p1) {
store atomic i32 %val, ptr %p1 unordered, align 4
ret ptr %p1
}
define ptr @test_store_i64(i64 %val, ptr %p1) {
store atomic i64 %val, ptr %p1 unordered, align 8
ret ptr %p1
}
define ptr @test_store_float(float %val, ptr %p1) {
store atomic float %val, ptr %p1 unordered, align 4
ret ptr %p1
}
define ptr @test_store_float_vec(float %val, ptr %p1) {
store atomic float %val, ptr %p1 unordered, align 4
ret ptr %p1
}
define ptr @test_store_double(double %val, ptr %p1) {
store atomic double %val, ptr %p1 unordered, align 8
ret ptr %p1
}
define ptr @test_store_double_vec(double %val, ptr %p1) {
store atomic double %val, ptr %p1 unordered, align 8
ret ptr %p1
}
define ptr @test_load_ptr(ptr %ptr1) {
%p = load atomic ptr, ptr %ptr1 unordered, align 8
ret ptr %p
}
define void @test_store_ptr(ptr %ptr1, ptr %a) {
store atomic ptr %a, ptr %ptr1 unordered, align 8
ret void
}
define i32 @test_gep_folding(ptr %arr, i32 %val) {
%arrayidx = getelementptr i32, ptr %arr, i32 5
store atomic i32 %val, ptr %arrayidx unordered, align 8
%r = load atomic i32, ptr %arrayidx unordered, align 8
ret i32 %r
}
define i32 @test_gep_folding_largeGepIndex(ptr %arr, i32 %val) #0 {
%arrayidx = getelementptr i32, ptr %arr, i64 57179869180
store atomic i32 %val, ptr %arrayidx unordered, align 8
%r = load atomic i32, ptr %arrayidx unordered, align 8
ret i32 %r
}
...
---
name: test_load_i8
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_i8
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s8) from %ir.p1)
; SSE: $al = COPY [[MOV8rm]]
; SSE: RET 0, implicit $al
; AVX-LABEL: name: test_load_i8
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s8) from %ir.p1)
; AVX: $al = COPY [[MOV8rm]]
; AVX: RET 0, implicit $al
; AVX512F-LABEL: name: test_load_i8
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s8) from %ir.p1)
; AVX512F: $al = COPY [[MOV8rm]]
; AVX512F: RET 0, implicit $al
; AVX512VL-LABEL: name: test_load_i8
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s8) from %ir.p1)
; AVX512VL: $al = COPY [[MOV8rm]]
; AVX512VL: RET 0, implicit $al
%0(p0) = COPY $rdi
%1(s8) = G_LOAD %0(p0) :: (load unordered (s8) from %ir.p1)
$al = COPY %1(s8)
RET 0, implicit $al
...
---
name: test_load_i16
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_i16
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s16) from %ir.p1)
; SSE: $ax = COPY [[MOV16rm]]
; SSE: RET 0, implicit $ax
; AVX-LABEL: name: test_load_i16
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s16) from %ir.p1)
; AVX: $ax = COPY [[MOV16rm]]
; AVX: RET 0, implicit $ax
; AVX512F-LABEL: name: test_load_i16
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s16) from %ir.p1)
; AVX512F: $ax = COPY [[MOV16rm]]
; AVX512F: RET 0, implicit $ax
; AVX512VL-LABEL: name: test_load_i16
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s16) from %ir.p1)
; AVX512VL: $ax = COPY [[MOV16rm]]
; AVX512VL: RET 0, implicit $ax
%0(p0) = COPY $rdi
%1(s16) = G_LOAD %0(p0) :: (load unordered (s16) from %ir.p1)
$ax = COPY %1(s16)
RET 0, implicit $ax
...
---
name: test_load_i32
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_i32
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; SSE: $eax = COPY [[MOV32rm]]
; SSE: RET 0, implicit $eax
; AVX-LABEL: name: test_load_i32
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; AVX: $eax = COPY [[MOV32rm]]
; AVX: RET 0, implicit $eax
; AVX512F-LABEL: name: test_load_i32
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; AVX512F: $eax = COPY [[MOV32rm]]
; AVX512F: RET 0, implicit $eax
; AVX512VL-LABEL: name: test_load_i32
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; AVX512VL: $eax = COPY [[MOV32rm]]
; AVX512VL: RET 0, implicit $eax
%0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load unordered (s32) from %ir.p1)
$eax = COPY %1(s32)
RET 0, implicit $eax
...
---
name: test_load_i64
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_i64
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; SSE: $rax = COPY [[MOV64rm]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_load_i64
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; AVX: $rax = COPY [[MOV64rm]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_load_i64
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; AVX512F: $rax = COPY [[MOV64rm]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_load_i64
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; AVX512VL: $rax = COPY [[MOV64rm]]
; AVX512VL: RET 0, implicit $rax
%0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load unordered (s64) from %ir.p1)
$rax = COPY %1(s64)
RET 0, implicit $rax
...
---
name: test_load_float
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- { id: 3, class: vecr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_float
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
; SSE: $xmm0 = COPY [[COPY2]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_float
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
; AVX: $xmm0 = COPY [[COPY2]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_float
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
; AVX512F: $xmm0 = COPY [[COPY2]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_float
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
; AVX512VL: $xmm0 = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $xmm0
%0:gpr(p0) = COPY $rdi
%1:gpr(s32) = G_LOAD %0(p0) :: (load unordered (s32) from %ir.p1)
%3:vecr(s32) = COPY %1(s32)
%2:vecr(s128) = G_ANYEXT %3(s32)
$xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
---
name: test_load_float_vecreg
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- { id: 3, class: vecr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_float_vecreg
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
; SSE: $xmm0 = COPY [[COPY2]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_float_vecreg
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[MOV32rm]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
; AVX: $xmm0 = COPY [[COPY2]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_float_vecreg
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
; AVX512F: $xmm0 = COPY [[COPY2]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_float_vecreg
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.p1)
; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[MOV32rm]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
; AVX512VL: $xmm0 = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $xmm0
%0:gpr(p0) = COPY $rdi
%1:gpr(s32) = G_LOAD %0(p0) :: (load unordered (s32) from %ir.p1)
%3:vecr(s32) = COPY %1(s32)
%2:vecr(s128) = G_ANYEXT %3(s32)
$xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
---
name: test_load_double
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- { id: 3, class: vecr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_double
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
; SSE: $xmm0 = COPY [[COPY2]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_double
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
; AVX: $xmm0 = COPY [[COPY2]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_double
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
; AVX512F: $xmm0 = COPY [[COPY2]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_double
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
; AVX512VL: $xmm0 = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $xmm0
%0:gpr(p0) = COPY $rdi
%1:gpr(s64) = G_LOAD %0(p0) :: (load unordered (s64) from %ir.p1)
%3:vecr(s64) = COPY %1(s64)
%2:vecr(s128) = G_ANYEXT %3(s64)
$xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
---
name: test_load_double_vecreg
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- { id: 3, class: vecr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_double_vecreg
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
; SSE: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
; SSE: $xmm0 = COPY [[COPY2]]
; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_double_vecreg
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[MOV64rm]]
; AVX: [[COPY2:%[0-9]+]]:vr128 = COPY [[COPY1]]
; AVX: $xmm0 = COPY [[COPY2]]
; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_double_vecreg
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
; AVX512F: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
; AVX512F: $xmm0 = COPY [[COPY2]]
; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_double_vecreg
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (s64) from %ir.p1)
; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[MOV64rm]]
; AVX512VL: [[COPY2:%[0-9]+]]:vr128x = COPY [[COPY1]]
; AVX512VL: $xmm0 = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $xmm0
%0:gpr(p0) = COPY $rdi
%1:gpr(s64) = G_LOAD %0(p0) :: (load unordered (s64) from %ir.p1)
%3:vecr(s64) = COPY %1(s64)
%2:vecr(s128) = G_ANYEXT %3(s64)
$xmm0 = COPY %2(s128)
RET 0, implicit $xmm0
...
---
name: test_store_i32
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $rsi
; SSE-LABEL: name: test_store_i32
; SSE: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s32) into %ir.p1)
; SSE: $rax = COPY [[COPY1]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_i32
; AVX: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s32) into %ir.p1)
; AVX: $rax = COPY [[COPY1]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_i32
; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s32) into %ir.p1)
; AVX512F: $rax = COPY [[COPY1]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_i32
; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s32) into %ir.p1)
; AVX512VL: $rax = COPY [[COPY1]]
; AVX512VL: RET 0, implicit $rax
%0(s32) = COPY $edi
%1(p0) = COPY $rsi
G_STORE %0(s32), %1(p0) :: (store unordered (s32) into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
...
---
name: test_store_i64
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $rsi
; SSE-LABEL: name: test_store_i64
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s64) into %ir.p1)
; SSE: $rax = COPY [[COPY1]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_i64
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s64) into %ir.p1)
; AVX: $rax = COPY [[COPY1]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_i64
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s64) into %ir.p1)
; AVX512F: $rax = COPY [[COPY1]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_i64
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store unordered (s64) into %ir.p1)
; AVX512VL: $rax = COPY [[COPY1]]
; AVX512VL: RET 0, implicit $rax
%0(s64) = COPY $rdi
%1(p0) = COPY $rsi
G_STORE %0(s64), %1(p0) :: (store unordered (s64) into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
...
---
name: test_store_float
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- { id: 3, class: gpr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_float
; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
; SSE: $rax = COPY [[COPY2]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_float
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
; AVX: $rax = COPY [[COPY2]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_float
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
; AVX512F: $rax = COPY [[COPY2]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_float
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
; AVX512VL: $rax = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $rax
%2:vecr(s128) = COPY $xmm0
%0:vecr(s32) = G_TRUNC %2(s128)
%1:gpr(p0) = COPY $rdi
%3:gpr(s32) = COPY %0(s32)
G_STORE %3(s32), %1(p0) :: (store unordered (s32) into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
...
---
name: test_store_float_vec
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- { id: 3, class: gpr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_float_vec
; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
; SSE: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
; SSE: $rax = COPY [[COPY2]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_float_vec
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
; AVX: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
; AVX: $rax = COPY [[COPY2]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_float_vec
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
; AVX512F: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
; AVX512F: $rax = COPY [[COPY2]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_float_vec
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY3:%[0-9]+]]:gr32 = COPY [[COPY1]]
; AVX512VL: MOV32mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s32) into %ir.p1)
; AVX512VL: $rax = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $rax
%2:vecr(s128) = COPY $xmm0
%0:vecr(s32) = G_TRUNC %2(s128)
%1:gpr(p0) = COPY $rdi
%3:gpr(s32) = COPY %0(s32)
G_STORE %3(s32), %1(p0) :: (store unordered (s32) into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
...
---
name: test_store_double
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- { id: 3, class: gpr, preferred-register: '' }
# NO_AVX512X: %0:fr64 = COPY $xmm0
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_double
; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
; SSE: $rax = COPY [[COPY2]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_double
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
; AVX: $rax = COPY [[COPY2]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_double
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
; AVX512F: $rax = COPY [[COPY2]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_double
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
; AVX512VL: $rax = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $rax
%2:vecr(s128) = COPY $xmm0
%0:vecr(s64) = G_TRUNC %2(s128)
%1:gpr(p0) = COPY $rdi
%3:gpr(s64) = COPY %0(s64)
G_STORE %3(s64), %1(p0) :: (store unordered (s64) into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
...
---
name: test_store_double_vec
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: vecr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: vecr, preferred-register: '' }
- { id: 3, class: gpr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_double_vec
; SSE: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
; SSE: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
; SSE: $rax = COPY [[COPY2]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_double_vec
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY [[COPY]]
; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
; AVX: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
; AVX: $rax = COPY [[COPY2]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_double_vec
; AVX512F: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
; AVX512F: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
; AVX512F: $rax = COPY [[COPY2]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_double_vec
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY [[COPY]]
; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY3:%[0-9]+]]:gr64 = COPY [[COPY1]]
; AVX512VL: MOV64mr [[COPY2]], 1, $noreg, 0, $noreg, [[COPY3]] :: (store unordered (s64) into %ir.p1)
; AVX512VL: $rax = COPY [[COPY2]]
; AVX512VL: RET 0, implicit $rax
%2:vecr(s128) = COPY $xmm0
%0:vecr(s64) = G_TRUNC %2(s128)
%1:gpr(p0) = COPY $rdi
%3:gpr(s64) = COPY %0(s64)
G_STORE %3(s64), %1(p0) :: (store unordered (s64) into %ir.p1)
$rax = COPY %1(p0)
RET 0, implicit $rax
...
---
name: test_load_ptr
alignment: 16
legalized: true
regBankSelected: true
selected: false
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $rdi
; SSE-LABEL: name: test_load_ptr
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (p0) from %ir.ptr1)
; SSE: $rax = COPY [[MOV64rm]]
; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_load_ptr
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (p0) from %ir.ptr1)
; AVX: $rax = COPY [[MOV64rm]]
; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_load_ptr
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (p0) from %ir.ptr1)
; AVX512F: $rax = COPY [[MOV64rm]]
; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_load_ptr
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load unordered (p0) from %ir.ptr1)
; AVX512VL: $rax = COPY [[MOV64rm]]
; AVX512VL: RET 0, implicit $rax
%0(p0) = COPY $rdi
%1(p0) = G_LOAD %0(p0) :: (load unordered (p0) from %ir.ptr1)
$rax = COPY %1(p0)
RET 0, implicit $rax
...
---
name: test_store_ptr
alignment: 16
legalized: true
regBankSelected: true
selected: false
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $rsi
; SSE-LABEL: name: test_store_ptr
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; SSE: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (p0) into %ir.ptr1)
; SSE: RET 0
; AVX-LABEL: name: test_store_ptr
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; AVX: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (p0) into %ir.ptr1)
; AVX: RET 0
; AVX512F-LABEL: name: test_store_ptr
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; AVX512F: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (p0) into %ir.ptr1)
; AVX512F: RET 0
; AVX512VL-LABEL: name: test_store_ptr
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; AVX512VL: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (p0) into %ir.ptr1)
; AVX512VL: RET 0
%0(p0) = COPY $rdi
%1(p0) = COPY $rsi
G_STORE %1(p0), %0(p0) :: (store unordered (p0) into %ir.ptr1)
RET 0
...
---
name: test_gep_folding
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
- { id: 3, class: gpr }
- { id: 4, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $esi, $rdi
; SSE-LABEL: name: test_gep_folding
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; SSE: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arrayidx)
; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered (s32) from %ir.arrayidx)
; SSE: $eax = COPY [[MOV32rm]]
; SSE: RET 0, implicit $eax
; AVX-LABEL: name: test_gep_folding
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arrayidx)
; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered (s32) from %ir.arrayidx)
; AVX: $eax = COPY [[MOV32rm]]
; AVX: RET 0, implicit $eax
; AVX512F-LABEL: name: test_gep_folding
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX512F: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arrayidx)
; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered (s32) from %ir.arrayidx)
; AVX512F: $eax = COPY [[MOV32rm]]
; AVX512F: RET 0, implicit $eax
; AVX512VL-LABEL: name: test_gep_folding
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX512VL: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arrayidx)
; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load unordered (s32) from %ir.arrayidx)
; AVX512VL: $eax = COPY [[MOV32rm]]
; AVX512VL: RET 0, implicit $eax
%0(p0) = COPY $rdi
%1(s32) = COPY $esi
%2(s64) = G_CONSTANT i64 20
%3(p0) = G_PTR_ADD %0, %2(s64)
G_STORE %1(s32), %3(p0) :: (store unordered (s32) into %ir.arrayidx)
%4(s32) = G_LOAD %3(p0) :: (load unordered (s32) from %ir.arrayidx)
$eax = COPY %4(s32)
RET 0, implicit $eax
...
---
name: test_gep_folding_largeGepIndex
alignment: 16
legalized: true
regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
- { id: 3, class: gpr }
- { id: 4, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $esi, $rdi
; SSE-LABEL: name: test_gep_folding_largeGepIndex
; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; SSE: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
; SSE: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arrayidx)
; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.arrayidx)
; SSE: $eax = COPY [[MOV32rm]]
; SSE: RET 0, implicit $eax
; AVX-LABEL: name: test_gep_folding_largeGepIndex
; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
; AVX: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arrayidx)
; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.arrayidx)
; AVX: $eax = COPY [[MOV32rm]]
; AVX: RET 0, implicit $eax
; AVX512F-LABEL: name: test_gep_folding_largeGepIndex
; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX512F: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
; AVX512F: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arrayidx)
; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.arrayidx)
; AVX512F: $eax = COPY [[MOV32rm]]
; AVX512F: RET 0, implicit $eax
; AVX512VL-LABEL: name: test_gep_folding_largeGepIndex
; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX512VL: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
; AVX512VL: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store unordered (s32) into %ir.arrayidx)
; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load unordered (s32) from %ir.arrayidx)
; AVX512VL: $eax = COPY [[MOV32rm]]
; AVX512VL: RET 0, implicit $eax
%0(p0) = COPY $rdi
%1(s32) = COPY $esi
%2(s64) = G_CONSTANT i64 228719476720
%3(p0) = G_PTR_ADD %0, %2(s64)
G_STORE %1(s32), %3(p0) :: (store unordered (s32) into %ir.arrayidx)
%4(s32) = G_LOAD %3(p0) :: (load unordered (s32) from %ir.arrayidx)
$eax = COPY %4(s32)
RET 0, implicit $eax
...