This patch canonicalizes getelementptr instructions with constant indices to use the `i8` source element type. This makes it easier for optimizations to recognize that two GEPs are identical, because they don't need to see past many different ways to express the same offset. This is a first step towards https://discourse.llvm.org/t/rfc-replacing-getelementptr-with-ptradd/68699. This is limited to constant GEPs only for now, as they have a clear canonical form, while we're not yet sure how exactly to deal with variable indices. The test llvm/test/Transforms/PhaseOrdering/switch_with_geps.ll gives two representative examples of the kind of optimization improvement we expect from this change. In the first test SimplifyCFG can now realize that all switch branches are actually the same. In the second test it can convert it into simple arithmetic. These are representative of common optimization failures we see in Rust. Fixes https://github.com/llvm/llvm-project/issues/69841.
72 lines
2.7 KiB
LLVM
72 lines
2.7 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; Verify that calls to memcmp with counts in excess of the array sizes are
|
|
; either folded gracefully or expanded to library calls.
|
|
;
|
|
; RUN: opt < %s -passes=instcombine -S -data-layout="E" | FileCheck %s --check-prefixes=BE
|
|
; RUN: opt < %s -passes=instcombine -S -data-layout="e" | FileCheck %s --check-prefixes=LE
|
|
|
|
declare i32 @memcmp(ptr, ptr, i64)
|
|
|
|
@ia16a = constant [4 x i16] [i16 24930, i16 25444, i16 25958, i16 26472]
|
|
@ia16b = constant [5 x i16] [i16 24930, i16 25444, i16 25958, i16 26472, i16 26992]
|
|
@ia16c = constant [6 x i16] [i16 24930, i16 25444, i16 25958, i16 26472, i16 26993, i16 29042]
|
|
|
|
|
|
; Fold calls with a count in excess of the size of one of the arrays that
|
|
; differ. They're strictly undefined but folding the result to the expected
|
|
; value (analogous to strncmp) is safer than letting a SIMD library
|
|
; implementation return a bogus value.
|
|
|
|
define void @fold_memcmp_mismatch_too_big(ptr %pcmp) {
|
|
; BE-LABEL: @fold_memcmp_mismatch_too_big(
|
|
; BE-NEXT: store i32 -1, ptr [[PCMP:%.*]], align 4
|
|
; BE-NEXT: [[PSTOR_CB:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
|
|
; BE-NEXT: store i32 1, ptr [[PSTOR_CB]], align 4
|
|
; BE-NEXT: ret void
|
|
;
|
|
; LE-LABEL: @fold_memcmp_mismatch_too_big(
|
|
; LE-NEXT: store i32 -1, ptr [[PCMP:%.*]], align 4
|
|
; LE-NEXT: [[PSTOR_CB:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
|
|
; LE-NEXT: store i32 1, ptr [[PSTOR_CB]], align 4
|
|
; LE-NEXT: ret void
|
|
;
|
|
|
|
%cmp_bc = call i32 @memcmp(ptr @ia16b, ptr @ia16c, i64 12)
|
|
store i32 %cmp_bc, ptr %pcmp
|
|
|
|
%cmp_cb = call i32 @memcmp(ptr @ia16c, ptr @ia16b, i64 12)
|
|
%pstor_cb = getelementptr i32, ptr %pcmp, i64 1
|
|
store i32 %cmp_cb, ptr %pstor_cb
|
|
|
|
ret void
|
|
}
|
|
|
|
|
|
; Fold even calls with excessive byte counts of arrays with matching bytes.
|
|
; Like in the instances above, this is preferable to letting the undefined
|
|
; calls take place, although it does prevent sanitizers from detecting them.
|
|
|
|
define void @fold_memcmp_match_too_big(ptr %pcmp) {
|
|
; BE-LABEL: @fold_memcmp_match_too_big(
|
|
; BE-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
|
|
; BE-NEXT: [[PSTOR_AB_M1:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
|
|
; BE-NEXT: store i32 0, ptr [[PSTOR_AB_M1]], align 4
|
|
; BE-NEXT: ret void
|
|
;
|
|
; LE-LABEL: @fold_memcmp_match_too_big(
|
|
; LE-NEXT: store i32 0, ptr [[PCMP:%.*]], align 4
|
|
; LE-NEXT: [[PSTOR_AB_M1:%.*]] = getelementptr i8, ptr [[PCMP]], i64 4
|
|
; LE-NEXT: store i32 0, ptr [[PSTOR_AB_M1]], align 4
|
|
; LE-NEXT: ret void
|
|
;
|
|
|
|
%cmp_ab_9 = call i32 @memcmp(ptr @ia16a, ptr @ia16b, i64 9)
|
|
store i32 %cmp_ab_9, ptr %pcmp
|
|
|
|
%cmp_ab_m1 = call i32 @memcmp(ptr @ia16a, ptr @ia16b, i64 -1)
|
|
%pstor_ab_m1 = getelementptr i32, ptr %pcmp, i64 1
|
|
store i32 %cmp_ab_m1, ptr %pstor_ab_m1
|
|
|
|
ret void
|
|
}
|