llvm/llvm/test/Transforms/InstCombine/lshr-ashr-of-uscmp.ll

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -passes=instcombine -S | FileCheck %s

declare void @use(i8 %val)

; ucmp/scmp(x, y) >> N folds to either zext(x < y) or sext(x < y)
; if N is one less than the width of result of ucmp/scmp
define i8 @ucmp_to_zext(i32 %x, i32 %y) {
; CHECK-LABEL: define i8 @ucmp_to_zext(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[X]], [[Y]]
; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i8
; CHECK-NEXT:    ret i8 [[TMP2]]
;
  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
  %2 = lshr i8 %1, 7
  ret i8 %2
}

define i8 @ucmp_to_sext(i32 %x, i32 %y) {
; CHECK-LABEL: define i8 @ucmp_to_sext(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[X]], [[Y]]
; CHECK-NEXT:    [[TMP2:%.*]] = sext i1 [[TMP1]] to i8
; CHECK-NEXT:    ret i8 [[TMP2]]
;
  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
  %2 = ashr i8 %1, 7
  ret i8 %2
}

define i8 @scmp_to_zext(i32 %x, i32 %y) {
; CHECK-LABEL: define i8 @scmp_to_zext(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[X]], [[Y]]
; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i8
; CHECK-NEXT:    ret i8 [[TMP2]]
;
  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
  %2 = lshr i8 %1, 7
  ret i8 %2
}

define i8 @scmp_to_sext(i32 %x, i32 %y) {
; CHECK-LABEL: define i8 @scmp_to_sext(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[X]], [[Y]]
; CHECK-NEXT:    [[TMP2:%.*]] = sext i1 [[TMP1]] to i8
; CHECK-NEXT:    ret i8 [[TMP2]]
;
  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
  %2 = ashr i8 %1, 7
  ret i8 %2
}

define <4 x i8> @scmp_to_sext_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: define <4 x i8> @scmp_to_sext_vec(
; CHECK-SAME: <4 x i32> [[X:%.*]], <4 x i32> [[Y:%.*]]) {
; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt <4 x i32> [[X]], [[Y]]
; CHECK-NEXT:    [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i8>
; CHECK-NEXT:    ret <4 x i8> [[TMP2]]
;
  %1 = call <4 x i8> @llvm.scmp(<4 x i32> %x, <4 x i32> %y)
  %2 = ashr <4 x i8> %1, <i8 7, i8 7, i8 7, i8 7>
  ret <4 x i8> %2
}

; Negative test: incorrect shift amount
define i8 @ucmp_to_zext_neg1(i32 %x, i32 %y) {
; CHECK-LABEL: define i8 @ucmp_to_zext_neg1(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
; CHECK-NEXT:    [[TMP2:%.*]] = lshr i8 [[TMP1]], 5
; CHECK-NEXT:    ret i8 [[TMP2]]
;
  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
  %2 = lshr i8 %1, 5
  ret i8 %2
}

; Negative test: shift amount is not a constant
define i8 @ucmp_to_zext_neg2(i32 %x, i32 %y, i8 %s) {
; CHECK-LABEL: define i8 @ucmp_to_zext_neg2(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], i8 [[S:%.*]]) {
; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
; CHECK-NEXT:    [[TMP2:%.*]] = lshr i8 [[TMP1]], [[S]]
; CHECK-NEXT:    ret i8 [[TMP2]]
;
  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
  %2 = lshr i8 %1, %s
  ret i8 %2
}

; Negative test: the result of ucmp/scmp is used more than once
define i8 @ucmp_to_zext_neg3(i32 %x, i32 %y) {
; CHECK-LABEL: define i8 @ucmp_to_zext_neg3(
; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
; CHECK-NEXT:    call void @use(i8 [[TMP1]])
; CHECK-NEXT:    [[TMP2:%.*]] = lshr i8 [[TMP1]], 7
; CHECK-NEXT:    ret i8 [[TMP2]]
;
  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
  call void @use(i8 %1)
  %2 = lshr i8 %1, 7
  ret i8 %2
}