llvm/llvm/test/Instrumentation/MemorySanitizer/vector-load-store.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s --implicit-check-not="call void @__msan_warning"
; RUN: opt < %s -msan-check-access-address=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ADDR --implicit-check-not="call void @__msan_warning"
; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ORIGINS --implicit-check-not="call void @__msan_warning"

target triple = "x86_64-unknown-linux-gnu"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"

define void @load.v1i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.v1i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i32>, ptr [[P:%.*]], align 4
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <1 x i32>, ptr [[TMP4]], align 4
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.v1i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0:![0-9]+]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3:[0-9]+]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <1 x i32>, ptr [[P:%.*]], align 4
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <1 x i32>, ptr [[TMP7]], align 4
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.v1i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <1 x i32>, ptr [[P:%.*]], align 4
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <1 x i32>, ptr [[TMP4]], align 4
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
; ORIGINS-NEXT:    ret void
;
  load <1 x i32>, ptr %p
  ret void
}

define void @load.v2i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.v2i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[P:%.*]], align 8
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <2 x i32>, ptr [[TMP4]], align 8
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.v2i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <2 x i32>, ptr [[P:%.*]], align 8
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <2 x i32>, ptr [[TMP7]], align 8
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.v2i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[P:%.*]], align 8
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <2 x i32>, ptr [[TMP4]], align 8
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 8
; ORIGINS-NEXT:    ret void
;
  load <2 x i32>, ptr %p
  ret void
}

define void @load.v4i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.v4i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 16
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 16
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.v4i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 16
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP7]], align 16
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.v4i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[P:%.*]], align 16
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 16
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 16
; ORIGINS-NEXT:    ret void
;
  load <4 x i32>, ptr %p
  ret void
}

define void @load.v8i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.v8i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 32
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.v8i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 32
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP7]], align 32
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.v8i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <8 x i32>, ptr [[P:%.*]], align 32
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <8 x i32>, ptr [[TMP4]], align 32
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 32
; ORIGINS-NEXT:    ret void
;
  load <8 x i32>, ptr %p
  ret void
}

define void @load.v16i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.v16i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr [[P:%.*]], align 64
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP4]], align 64
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.v16i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <16 x i32>, ptr [[P:%.*]], align 64
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP7]], align 64
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.v16i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <16 x i32>, ptr [[P:%.*]], align 64
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <16 x i32>, ptr [[TMP4]], align 64
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 64
; ORIGINS-NEXT:    ret void
;
  load <16 x i32>, ptr %p
  ret void
}


define void @store.v1i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.v1i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <1 x i32> zeroinitializer, ptr [[TMP3]], align 4
; CHECK-NEXT:    store <1 x i32> zeroinitializer, ptr [[P]], align 4
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.v1i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <1 x i32> zeroinitializer, ptr [[TMP6]], align 4
; ADDR-NEXT:    store <1 x i32> zeroinitializer, ptr [[P]], align 4
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.v1i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <1 x i32> zeroinitializer, ptr [[TMP3]], align 4
; ORIGINS-NEXT:    store <1 x i32> zeroinitializer, ptr [[P]], align 4
; ORIGINS-NEXT:    ret void
;
  store <1 x i32> zeroinitializer, ptr %p
  ret void
}

define void @store.v2i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.v2i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP3]], align 8
; CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[P]], align 8
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.v2i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP6]], align 8
; ADDR-NEXT:    store <2 x i32> zeroinitializer, ptr [[P]], align 8
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.v2i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <2 x i32> zeroinitializer, ptr [[TMP3]], align 8
; ORIGINS-NEXT:    store <2 x i32> zeroinitializer, ptr [[P]], align 8
; ORIGINS-NEXT:    ret void
;
  store <2 x i32> zeroinitializer, ptr %p
  ret void
}

define void @store.v4i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.v4i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP3]], align 16
; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[P]], align 16
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.v4i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP6]], align 16
; ADDR-NEXT:    store <4 x i32> zeroinitializer, ptr [[P]], align 16
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.v4i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <4 x i32> zeroinitializer, ptr [[TMP3]], align 16
; ORIGINS-NEXT:    store <4 x i32> zeroinitializer, ptr [[P]], align 16
; ORIGINS-NEXT:    ret void
;
  store <4 x i32> zeroinitializer, ptr %p
  ret void
}

define void @store.v8i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.v8i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP3]], align 32
; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[P]], align 32
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.v8i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP6]], align 32
; ADDR-NEXT:    store <8 x i32> zeroinitializer, ptr [[P]], align 32
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.v8i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <8 x i32> zeroinitializer, ptr [[TMP3]], align 32
; ORIGINS-NEXT:    store <8 x i32> zeroinitializer, ptr [[P]], align 32
; ORIGINS-NEXT:    ret void
;
  store <8 x i32> zeroinitializer, ptr %p
  ret void
}

define void @store.v16i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.v16i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP3]], align 64
; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[P]], align 64
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.v16i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP6]], align 64
; ADDR-NEXT:    store <16 x i32> zeroinitializer, ptr [[P]], align 64
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.v16i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <16 x i32> zeroinitializer, ptr [[TMP3]], align 64
; ORIGINS-NEXT:    store <16 x i32> zeroinitializer, ptr [[P]], align 64
; ORIGINS-NEXT:    ret void
;
  store <16 x i32> zeroinitializer, ptr %p
  ret void
}

define void @load.nxv1i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.nxv1i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 1 x i32>, ptr [[P:%.*]], align 4
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP4]], align 4
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.nxv1i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 1 x i32>, ptr [[P:%.*]], align 4
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP7]], align 4
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.nxv1i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 1 x i32>, ptr [[P:%.*]], align 4
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 1 x i32>, ptr [[TMP4]], align 4
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
; ORIGINS-NEXT:    ret void
;
  load <vscale x 1 x i32>, ptr %p
  ret void
}

define void @load.nxv2i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.nxv2i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i32>, ptr [[P:%.*]], align 8
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.nxv2i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 2 x i32>, ptr [[P:%.*]], align 8
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP7]], align 8
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.nxv2i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 2 x i32>, ptr [[P:%.*]], align 8
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 2 x i32>, ptr [[TMP4]], align 8
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 8
; ORIGINS-NEXT:    ret void
;
  load <vscale x 2 x i32>, ptr %p
  ret void
}

define void @load.nxv4i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.nxv4i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[P:%.*]], align 16
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.nxv4i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 4 x i32>, ptr [[P:%.*]], align 16
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP7]], align 16
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.nxv4i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 4 x i32>, ptr [[P:%.*]], align 16
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP4]], align 16
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 16
; ORIGINS-NEXT:    ret void
;
  load <vscale x 4 x i32>, ptr %p
  ret void
}

define void @load.nxv8i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.nxv8i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i32>, ptr [[P:%.*]], align 32
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 8 x i32>, ptr [[TMP4]], align 32
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.nxv8i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 8 x i32>, ptr [[P:%.*]], align 32
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 8 x i32>, ptr [[TMP7]], align 32
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.nxv8i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 8 x i32>, ptr [[P:%.*]], align 32
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 8 x i32>, ptr [[TMP4]], align 32
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 32
; ORIGINS-NEXT:    ret void
;
  load <vscale x 8 x i32>, ptr %p
  ret void
}

define void @load.nxv16i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @load.nxv16i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i32>, ptr [[P:%.*]], align 64
; CHECK-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; CHECK-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; CHECK-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; CHECK-NEXT:    [[_MSLD:%.*]] = load <vscale x 16 x i32>, ptr [[TMP4]], align 64
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @load.nxv16i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = load <vscale x 16 x i32>, ptr [[P:%.*]], align 64
; ADDR-NEXT:    [[TMP5:%.*]] = ptrtoint ptr [[P]] to i64
; ADDR-NEXT:    [[TMP6:%.*]] = xor i64 [[TMP5]], 87960930222080
; ADDR-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to ptr
; ADDR-NEXT:    [[_MSLD:%.*]] = load <vscale x 16 x i32>, ptr [[TMP7]], align 64
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @load.nxv16i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = load <vscale x 16 x i32>, ptr [[P:%.*]], align 64
; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint ptr [[P]] to i64
; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to ptr
; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
; ORIGINS-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ORIGINS-NEXT:    [[_MSLD:%.*]] = load <vscale x 16 x i32>, ptr [[TMP4]], align 64
; ORIGINS-NEXT:    [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 64
; ORIGINS-NEXT:    ret void
;
  load <vscale x 16 x i32>, ptr %p
  ret void
}


define void @store.nxv1i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.nxv1i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[TMP3]], align 4
; CHECK-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.nxv1i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[TMP6]], align 4
; ADDR-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.nxv1i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[TMP3]], align 4
; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv1i32(<vscale x 1 x i32> zeroinitializer)
; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0:![0-9]+]]
; ORIGINS:       7:
; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 4
; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
; ORIGINS:       .split:
; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
; ORIGINS:       .split.split:
; ORIGINS-NEXT:    br label [[TMP13]]
; ORIGINS:       13:
; ORIGINS-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
; ORIGINS-NEXT:    ret void
;
  store <vscale x 1 x i32> zeroinitializer, ptr %p
  ret void
}

define void @store.nxv2i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.nxv2i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[TMP3]], align 8
; CHECK-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.nxv2i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[TMP6]], align 8
; ADDR-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.nxv2i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[TMP3]], align 8
; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> zeroinitializer)
; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
; ORIGINS:       7:
; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 8
; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
; ORIGINS:       .split:
; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
; ORIGINS:       .split.split:
; ORIGINS-NEXT:    br label [[TMP13]]
; ORIGINS:       13:
; ORIGINS-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
; ORIGINS-NEXT:    ret void
;
  store <vscale x 2 x i32> zeroinitializer, ptr %p
  ret void
}

define void @store.nxv4i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.nxv4i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[TMP3]], align 16
; CHECK-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.nxv4i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[TMP6]], align 16
; ADDR-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.nxv4i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[TMP3]], align 16
; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> zeroinitializer)
; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
; ORIGINS:       7:
; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 16
; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
; ORIGINS:       .split:
; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
; ORIGINS:       .split.split:
; ORIGINS-NEXT:    br label [[TMP13]]
; ORIGINS:       13:
; ORIGINS-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
; ORIGINS-NEXT:    ret void
;
  store <vscale x 4 x i32> zeroinitializer, ptr %p
  ret void
}

define void @store.nxv8i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.nxv8i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[TMP3]], align 32
; CHECK-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.nxv8i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[TMP6]], align 32
; ADDR-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.nxv8i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[TMP3]], align 32
; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv8i32(<vscale x 8 x i32> zeroinitializer)
; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
; ORIGINS:       7:
; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 32
; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
; ORIGINS:       .split:
; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
; ORIGINS:       .split.split:
; ORIGINS-NEXT:    br label [[TMP13]]
; ORIGINS:       13:
; ORIGINS-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
; ORIGINS-NEXT:    ret void
;
  store <vscale x 8 x i32> zeroinitializer, ptr %p
  ret void
}

define void @store.nxv16i32(ptr %p) sanitize_memory {
; CHECK-LABEL: @store.nxv16i32(
; CHECK-NEXT:    call void @llvm.donothing()
; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; CHECK-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[TMP3]], align 64
; CHECK-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
; CHECK-NEXT:    ret void
;
; ADDR-LABEL: @store.nxv16i32(
; ADDR-NEXT:    [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8
; ADDR-NEXT:    call void @llvm.donothing()
; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
; ADDR-NEXT:    br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0]]
; ADDR:       2:
; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR3]]
; ADDR-NEXT:    unreachable
; ADDR:       3:
; ADDR-NEXT:    [[TMP4:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ADDR-NEXT:    [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080
; ADDR-NEXT:    [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr
; ADDR-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[TMP6]], align 64
; ADDR-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
; ADDR-NEXT:    ret void
;
; ORIGINS-LABEL: @store.nxv16i32(
; ORIGINS-NEXT:    call void @llvm.donothing()
; ORIGINS-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
; ORIGINS-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
; ORIGINS-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to ptr
; ORIGINS-NEXT:    [[TMP4:%.*]] = add i64 [[TMP2]], 17592186044416
; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to ptr
; ORIGINS-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[TMP3]], align 64
; ORIGINS-NEXT:    [[TMP6:%.*]] = call i32 @llvm.vector.reduce.or.nxv16i32(<vscale x 16 x i32> zeroinitializer)
; ORIGINS-NEXT:    [[_MSCMP:%.*]] = icmp ne i32 [[TMP6]], 0
; ORIGINS-NEXT:    br i1 [[_MSCMP]], label [[TMP7:%.*]], label [[TMP13:%.*]], !prof [[PROF0]]
; ORIGINS:       7:
; ORIGINS-NEXT:    [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
; ORIGINS-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP8]], 64
; ORIGINS-NEXT:    [[TMP10:%.*]] = add i64 [[TMP9]], 3
; ORIGINS-NEXT:    [[TMP11:%.*]] = udiv i64 [[TMP10]], 4
; ORIGINS-NEXT:    br label [[DOTSPLIT:%.*]]
; ORIGINS:       .split:
; ORIGINS-NEXT:    [[IV:%.*]] = phi i64 [ 0, [[TMP7]] ], [ [[IV_NEXT:%.*]], [[DOTSPLIT]] ]
; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, ptr [[TMP5]], i64 [[IV]]
; ORIGINS-NEXT:    store i32 0, ptr [[TMP12]], align 4
; ORIGINS-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; ORIGINS-NEXT:    [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP11]]
; ORIGINS-NEXT:    br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
; ORIGINS:       .split.split:
; ORIGINS-NEXT:    br label [[TMP13]]
; ORIGINS:       13:
; ORIGINS-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
; ORIGINS-NEXT:    ret void
;
  store <vscale x 16 x i32> zeroinitializer, ptr %p
  ret void
}