Files
clang-p2996/llvm/test/CodeGen/X86/shift-folding.ll
Bjorn Pettersson 46cacdbb21 [DAGCombiner] Adjust some checks in DAGCombiner::reduceLoadWidth
In code review for D117104 two slightly weird checks were found
in DAGCombiner::reduceLoadWidth. They were typically checking
if BitsA was a mulitple of BitsB by looking at (BitsA & (BitsB - 1)),
but such a comparison actually only make sense if BitsB is a power
of two.

The checks were related to the code that attempted to shrink a load
based on the fact that the loaded value would be right shifted.

Afaict the legality of the value types is checked later (typically in
isLegalNarrowLdSt), so the existing checks were both overly
conservative as well as being wrong whenever ExtVTBits wasn't a
power of two. The latter was a situation triggered by a number of
lit tests so we could not just assert on ExtVTBIts being a power of
two).

When attempting to simply remove the checks I found some problems,
that seems to have been guarded by the checks (maybe just out of
luck). A typical example would be a pattern like this:

  t1 = load i96* ptr
  t2 = srl t1, 64
  t3 = truncate t2 to i64

When DAGCombine is visiting the truncate reduceLoadWidth is called
attempting to narrow the load to 64 bits (ExtVT := MVT::i64). Then
the SRL is detected and we set ShAmt to 64.

In the past we've bailed out due to i96 not being a multiple of 64.
If we simply remove that check then we would end up replacing the
load with a new load that would read 64 bits but with a base pointer
adjusted by 64 bits. So we would read 32 bits the wasn't accessed by
the original load.
This patch will instead utilize the fact that the logical left shift
can be folded away by using a zextload. Thus, the pattern above will
now be combined into

  t3 = load i32* ptr+offset, zext to i64


Another case is shown in the X86/shift-folding.ll test case:

  t1 = load i32* ptr
  t2 = srl i32 t1, 8
  t3 = truncate t2 to i16

In the past we bailed out due to the shift count (8) not being a
multiple of 16. Now the narrowing kicks in and we get

  t3 = load i16* ptr+offset

Differential Revision: https://reviews.llvm.org/D117406
2022-01-24 12:22:04 +01:00

111 lines
3.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -verify-coalescing | FileCheck %s
define i32* @test1(i32* %P, i32 %X) {
; CHECK-LABEL: test1:
; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: andl $-4, %eax
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: retl
%Y = lshr i32 %X, 2
%gep.upgrd.1 = zext i32 %Y to i64
%P2 = getelementptr i32, i32* %P, i64 %gep.upgrd.1
ret i32* %P2
}
define i32* @test2(i32* %P, i32 %X) {
; CHECK-LABEL: test2:
; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: shll $4, %eax
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: retl
%Y = shl i32 %X, 2
%gep.upgrd.2 = zext i32 %Y to i64
%P2 = getelementptr i32, i32* %P, i64 %gep.upgrd.2
ret i32* %P2
}
define i32* @test3(i32* %P, i32 %X) {
; CHECK-LABEL: test3:
; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: andl $-4, %eax
; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: retl
%Y = ashr i32 %X, 2
%P2 = getelementptr i32, i32* %P, i32 %Y
ret i32* %P2
}
define fastcc i32 @test4(i32* %d) {
; CHECK-LABEL: test4:
; CHECK: # %bb.0:
; CHECK-NEXT: movzbl 3(%ecx), %eax
; CHECK-NEXT: retl
%tmp4 = load i32, i32* %d
%tmp512 = lshr i32 %tmp4, 24
ret i32 %tmp512
}
; Ensure that we don't fold away shifts which have multiple uses, as they are
; just re-introduced for the second use.
define i64 @test5(i16 %i, i32* %arr) {
; CHECK-LABEL: test5:
; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
; CHECK-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: shrl $11, %eax
; CHECK-NEXT: xorl %edx, %edx
; CHECK-NEXT: addl (%ecx,%eax,4), %eax
; CHECK-NEXT: setb %dl
; CHECK-NEXT: retl
%i.zext = zext i16 %i to i32
%index = lshr i32 %i.zext, 11
%index.zext = zext i32 %index to i64
%val.ptr = getelementptr inbounds i32, i32* %arr, i64 %index.zext
%val = load i32, i32* %val.ptr
%val.zext = zext i32 %val to i64
%sum = add i64 %val.zext, %index.zext
ret i64 %sum
}
; We should not crash because an undef shift was created.
define i32 @overshift(i32 %a) {
; CHECK-LABEL: overshift:
; CHECK: # %bb.0:
; CHECK-NEXT: retl
%shr = lshr i32 %a, 33
%xor = xor i32 1, %shr
ret i32 %xor
}
; Should be possible to adjust the pointer and narrow the load to 16 bits.
define i16 @srl_load_narrowing1(i32* %arg) {
; CHECK-LABEL: srl_load_narrowing1:
; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzwl 1(%eax), %eax
; CHECK-NEXT: retl
%tmp1 = load i32, i32* %arg, align 1
%tmp2 = lshr i32 %tmp1, 8
%tmp3 = trunc i32 %tmp2 to i16
ret i16 %tmp3
}
define i16 @srl_load_narrowing2(i32* %arg) {
; CHECK-LABEL: srl_load_narrowing2:
; CHECK: # %bb.0:
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: movzbl 3(%eax), %eax
; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retl
%tmp1 = load i32, i32* %arg, align 1
%tmp2 = lshr i32 %tmp1, 24
%tmp3 = trunc i32 %tmp2 to i16
ret i16 %tmp3
}