; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
;
; Exercise folding of memcmp calls with constant arrays including both
; negative and positive characters and both constant and nonconstant sizes.
declare i32 @memcmp(ptr, ptr, i64)
@a = constant [7 x i8] c"abcdef\7f"
@b = constant [7 x i8] c"abcdef\80"
; Exercise memcmp(A + C, B + C, 2) folding of small arrays that differ in
; a character with the opposite sign and a constant size.
define void @fold_memcmp_cst_cst(ptr %pcmp) {
; CHECK-LABEL: @fold_memcmp_cst_cst(
; CHECK-NEXT: store i32 -1, ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: [[SB5_A5:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
; CHECK-NEXT: store i32 1, ptr [[SB5_A5]], align 4
; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i8, ptr [[PCMP]], i64 8
; CHECK-NEXT: store i32 -1, ptr [[SA6_B6]], align 4
; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i8, ptr [[PCMP]], i64 12
; CHECK-NEXT: store i32 1, ptr [[SB6_A6]], align 4
; CHECK-NEXT: ret void
;
%p5 = getelementptr [7 x i8], ptr @a, i64 0, i64 5
%p6 = getelementptr [7 x i8], ptr @a, i64 0, i64 6
%q5 = getelementptr [7 x i8], ptr @b, i64 0, i64 5
%q6 = getelementptr [7 x i8], ptr @b, i64 0, i64 6
; Fold memcmp(a + 5, b + 5, 2) to -1.
%ca5_b5 = call i32 @memcmp(ptr %p5, ptr %q5, i64 2)
store i32 %ca5_b5, ptr %pcmp
; Fold memcmp(b + 5, a + 5, 2) to +1.
%cb5_a5 = call i32 @memcmp(ptr %q5, ptr %p5, i64 2)
%sb5_a5 = getelementptr i32, ptr %pcmp, i64 1
store i32 %cb5_a5, ptr %sb5_a5
; Fold memcmp(a + 6, b + 6, 1) to -1.
%ca6_b6 = call i32 @memcmp(ptr %p6, ptr %q6, i64 1)
%sa6_b6 = getelementptr i32, ptr %pcmp, i64 2
store i32 %ca6_b6, ptr %sa6_b6
; Fold memcmp(b + 6, a + 6, 1) to +1.
%cb6_a6 = call i32 @memcmp(ptr %q6, ptr %p6, i64 1)
%sb6_a6 = getelementptr i32, ptr %pcmp, i64 3
store i32 %cb6_a6, ptr %sb6_a6
ret void
}
; Exercise memcmp(A, B, N) folding of arrays that differ in a character
; with the opposite sign and a variable size
define void @fold_memcmp_cst_var(ptr %pcmp, i64 %n) {
; CHECK-LABEL: @fold_memcmp_cst_var(
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[N:%.*]], 6
; CHECK-NEXT: [[CA0_B0:%.*]] = sext i1 [[TMP1]] to i32
; CHECK-NEXT: store i32 [[CA0_B0]], ptr [[PCMP:%.*]], align 4
; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[N]], 6
; CHECK-NEXT: [[CB0_A0:%.*]] = zext i1 [[TMP2]] to i32
; CHECK-NEXT: [[SB0_A0:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
; CHECK-NEXT: store i32 [[CB0_A0]], ptr [[SB0_A0]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[CA6_B6:%.*]] = sext i1 [[TMP3]] to i32
; CHECK-NEXT: [[SA6_B6:%.*]] = getelementptr i8, ptr [[PCMP]], i64 8
; CHECK-NEXT: store i32 [[CA6_B6]], ptr [[SA6_B6]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i64 [[N]], 0
; CHECK-NEXT: [[CB6_A6:%.*]] = zext i1 [[TMP4]] to i32
; CHECK-NEXT: [[SB6_A6:%.*]] = getelementptr i8, ptr [[PCMP]], i64 12
; CHECK-NEXT: store i32 [[CB6_A6]], ptr [[SB6_A6]], align 4
; CHECK-NEXT: ret void
;
%p6 = getelementptr [7 x i8], ptr @a, i64 0, i64 6
%q6 = getelementptr [7 x i8], ptr @b, i64 0, i64 6
; Fold memcmp(a, b, n) to -1.
%ca0_b0 = call i32 @memcmp(ptr @a, ptr @b, i64 %n)
store i32 %ca0_b0, ptr %pcmp
; Fold memcmp(b, a, n) to +1.
%cb0_a0 = call i32 @memcmp(ptr @b, ptr @a, i64 %n)
%sb0_a0 = getelementptr i32, ptr %pcmp, i64 1
store i32 %cb0_a0, ptr %sb0_a0
; Fold memcmp(a + 6, b + 6, n) to -1.
%ca6_b6 = call i32 @memcmp(ptr %p6, ptr %q6, i64 %n)
%sa6_b6 = getelementptr i32, ptr %pcmp, i64 2
store i32 %ca6_b6, ptr %sa6_b6
; Fold memcmp(b + 6, a + 6, n) to +1.
%cb6_a6 = call i32 @memcmp(ptr %q6, ptr %p6, i64 %n)
%sb6_a6 = getelementptr i32, ptr %pcmp, i64 3
store i32 %cb6_a6, ptr %sb6_a6
ret void
}