llvm/llvm/test/CodeGen/Hexagon/autohvx/qmul-chop.ll

; RUN: llc -march=hexagon < %s -verify-machineinstrs | FileCheck %s

; Check that the code is not scalarized: check that no scalar multiplication
; are generated.
; CHECK-NOT: mpyu

target datalayout = "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32:32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32:32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048:2048:2048"
target triple = "hexagon"

define void @f0(i32 %a0) #0 {
b0:
  %v0 = load i32, ptr poison, align 4
  %v1 = add nsw i32 %v0, 3135
  %v2 = sdiv i32 %v1, %v0
  %v3 = mul nsw i32 %v2, %a0
  %v4 = tail call i32 @llvm.smin.i32(i32 %v3, i32 3136)
  %v5 = shl nsw i32 %v4, 7
  %v6 = load <128 x i8>, ptr poison, align 64
  %v7 = zext <128 x i8> %v6 to <128 x i64>
  %v8 = mul nuw nsw <128 x i64> %v7, <i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818, i64 1698967818>
  %v9 = add nuw nsw <128 x i64> %v8, <i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824, i64 1073741824>
  %v10 = lshr <128 x i64> %v9, <i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31, i64 31>
  %v11 = trunc <128 x i64> %v10 to <128 x i32>
  %v12 = add nsw <128 x i32> zeroinitializer, %v11
  %v13 = tail call <128 x i32> @llvm.smin.v128i32(<128 x i32> %v12, <128 x i32> <i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255, i32 255>)
  %v14 = tail call <128 x i32> @llvm.smax.v128i32(<128 x i32> %v13, <128 x i32> zeroinitializer)
  %v15 = trunc <128 x i32> %v14 to <128 x i8>
  %v16 = getelementptr inbounds i8, ptr null, i32 %v5
  store <128 x i8> %v15, ptr %v16, align 64
  ret void
}

; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
declare i32 @llvm.smin.i32(i32, i32) #1

; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
declare <128 x i32> @llvm.smin.v128i32(<128 x i32>, <128 x i32>) #1

; Function Attrs: nocallback nofree nosync nounwind readnone speculatable willreturn
declare <128 x i32> @llvm.smax.v128i32(<128 x i32>, <128 x i32>) #1

attributes #0 = { "target-features"="+v68,+hvxv68,+hvx-length128b,+hvx-qfloat,-hvx-ieee-fp" }
attributes #1 = { nocallback nofree nosync nounwind readnone speculatable willreturn }