Files
clang-p2996/llvm/test/CodeGen/AArch64/extract-bits.ll
David Green adec922361 [AArch64] Make -mcpu=generic schedule for an in-order core
We would like to start pushing -mcpu=generic towards enabling the set of
features that improves performance for some CPUs, without hurting any
others. A blend of the performance options hopefully beneficial to all
CPUs. The largest part of that is enabling in-order scheduling using the
Cortex-A55 schedule model. This is similar to the Arm backend change
from eecb353d0e which made -mcpu=generic perform in-order scheduling
using the cortex-a8 schedule model.

The idea is that in-order cpu's require the most help in instruction
scheduling, whereas out-of-order cpus can for the most part out-of-order
schedule around different codegen. Our benchmarking suggests that
hypothesis holds. When running on an in-order core this improved
performance by 3.8% geomean on a set of DSP workloads, 2% geomean on
some other embedded benchmark and between 1% and 1.8% on a set of
singlecore and multicore workloads, all running on a Cortex-A55 cluster.

On an out-of-order cpu the results are a lot more noisy but show flat
performance or an improvement. On the set of DSP and embedded
benchmarks, run on a Cortex-A78 there was a very noisy 1% speed
improvement. Using the most detailed results I could find, SPEC2006 runs
on a Neoverse N1 show a small increase in instruction count (+0.127%),
but a decrease in cycle counts (-0.155%, on average). The instruction
count is very low noise, the cycle count is more noisy with a 0.15%
decrease not being significant. SPEC2k17 shows a small decrease (-0.2%)
in instruction count leading to a -0.296% decrease in cycle count. These
results are within noise margins but tend to show a small improvement in
general.

When specifying an Apple target, clang will set "-target-cpu apple-a7"
on the command line, so should not be affected by this change when
running from clang. This also doesn't enable more runtime unrolling like
-mcpu=cortex-a55 does, only changing the schedule used.

A lot of existing tests have updated. This is a summary of the important
differences:
 - Most changes are the same instructions in a different order.
 - Sometimes this leads to very minor inefficiencies, such as requiring
   an extra mov to move variables into r0/v0 for the return value of a test
   function.
 - misched-fusion.ll was no longer fusing the pairs of instructions it
   should, as per D110561. I've changed the schedule used in the test
   for now.
 - neon-mla-mls.ll now uses "mul; sub" as opposed to "neg; mla" due to
   the different latencies. This seems fine to me.
 - Some SVE tests do not always remove movprfx where they did before due
   to different register allocation giving different destructive forms.
 - The tests argument-blocks-array-of-struct.ll and arm64-windows-calls.ll
   produce two LDR where they previously produced an LDP due to
   store-pair-suppress kicking in.
 - arm64-ldp.ll and arm64-neon-copy.ll are missing pre/postinc on LPD.
 - Some tests such as arm64-neon-mul-div.ll and
   ragreedy-local-interval-cost.ll have more, less or just different
   spilling.
 - In aarch64_generated_funcs.ll.generated.expected one part of the
   function is no longer outlined. Interestingly if I switch this to use
   any other scheduled even less is outlined.

Some of these are expected to happen, such as differences in outlining
or register spilling. There will be places where these result in worse
codegen, places where they are better, with the SPEC instruction counts
suggesting it is not a decrease overall, on average.

Differential Revision: https://reviews.llvm.org/D110830
2021-10-09 15:58:31 +01:00

1177 lines
36 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-unknown-linux-gnu < %s | FileCheck %s
; *Please* keep in sync with test/CodeGen/X86/extract-bits.ll
; https://bugs.llvm.org/show_bug.cgi?id=36419
; https://bugs.llvm.org/show_bug.cgi?id=37603
; https://bugs.llvm.org/show_bug.cgi?id=37610
; Patterns:
; a) (x >> start) & (1 << nbits) - 1
; b) (x >> start) & ~(-1 << nbits)
; c) (x >> start) & (-1 >> (32 - y))
; d) (x >> start) << (32 - y) >> (32 - y)
; are equivalent.
; ---------------------------------------------------------------------------- ;
; Pattern a. 32-bit
; ---------------------------------------------------------------------------- ;
define i32 @bextr32_a0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_a0:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsr w9, w0, w1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: sub w8, w8, #1
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%shifted = lshr i32 %val, %numskipbits
%onebit = shl i32 1, %numlowbits
%mask = add nsw i32 %onebit, -1
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_a0_arithmetic(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_a0_arithmetic:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: asr w9, w0, w1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: sub w8, w8, #1
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%shifted = ashr i32 %val, %numskipbits
%onebit = shl i32 1, %numlowbits
%mask = add nsw i32 %onebit, -1
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_a1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
; CHECK-LABEL: bextr32_a1_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsr w9, w0, w1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: sub w8, w8, #1
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%skip = zext i8 %numskipbits to i32
%shifted = lshr i32 %val, %skip
%conv = zext i8 %numlowbits to i32
%onebit = shl i32 1, %conv
%mask = add nsw i32 %onebit, -1
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_a2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_a2_load:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: sub w8, w8, #1
; CHECK-NEXT: lsr w9, w9, w1
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%val = load i32, i32* %w
%shifted = lshr i32 %val, %numskipbits
%onebit = shl i32 1, %numlowbits
%mask = add nsw i32 %onebit, -1
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_a3_load_indexzext(i32* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
; CHECK-LABEL: bextr32_a3_load_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: sub w8, w8, #1
; CHECK-NEXT: lsr w9, w9, w1
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%val = load i32, i32* %w
%skip = zext i8 %numskipbits to i32
%shifted = lshr i32 %val, %skip
%conv = zext i8 %numlowbits to i32
%onebit = shl i32 1, %conv
%mask = add nsw i32 %onebit, -1
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_a4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_a4_commutative:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsr w9, w0, w1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: sub w8, w8, #1
; CHECK-NEXT: and w0, w9, w8
; CHECK-NEXT: ret
%shifted = lshr i32 %val, %numskipbits
%onebit = shl i32 1, %numlowbits
%mask = add nsw i32 %onebit, -1
%masked = and i32 %shifted, %mask ; swapped order
ret i32 %masked
}
; 64-bit
define i64 @bextr64_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_a0:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: and x0, x8, x9
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%onebit = shl i64 1, %numlowbits
%mask = add nsw i64 %onebit, -1
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_a0_arithmetic(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_a0_arithmetic:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: asr x9, x0, x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: and x0, x8, x9
; CHECK-NEXT: ret
%shifted = ashr i64 %val, %numskipbits
%onebit = shl i64 1, %numlowbits
%mask = add nsw i64 %onebit, -1
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_a1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
; CHECK-LABEL: bextr64_a1_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: and x0, x8, x9
; CHECK-NEXT: ret
%skip = zext i8 %numskipbits to i64
%shifted = lshr i64 %val, %skip
%conv = zext i8 %numlowbits to i64
%onebit = shl i64 1, %conv
%mask = add nsw i64 %onebit, -1
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_a2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_a2_load:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: lsr x9, x9, x1
; CHECK-NEXT: and x0, x8, x9
; CHECK-NEXT: ret
%val = load i64, i64* %w
%shifted = lshr i64 %val, %numskipbits
%onebit = shl i64 1, %numlowbits
%mask = add nsw i64 %onebit, -1
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_a3_load_indexzext(i64* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
; CHECK-LABEL: bextr64_a3_load_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: lsr x9, x9, x1
; CHECK-NEXT: and x0, x8, x9
; CHECK-NEXT: ret
%val = load i64, i64* %w
%skip = zext i8 %numskipbits to i64
%shifted = lshr i64 %val, %skip
%conv = zext i8 %numlowbits to i64
%onebit = shl i64 1, %conv
%mask = add nsw i64 %onebit, -1
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_a4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_a4_commutative:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: and x0, x9, x8
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%onebit = shl i64 1, %numlowbits
%mask = add nsw i64 %onebit, -1
%masked = and i64 %shifted, %mask ; swapped order
ret i64 %masked
}
; 64-bit, but with 32-bit output
; Everything done in 64-bit, truncation happens last.
define i32 @bextr64_32_a0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_a0:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: sub w8, w8, #1
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%onebit = shl i64 1, %numlowbits
%mask = add nsw i64 %onebit, -1
%masked = and i64 %mask, %shifted
%res = trunc i64 %masked to i32
ret i32 %res
}
; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
define i32 @bextr64_32_a1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_a1:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: sub w8, w8, #1
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%truncshifted = trunc i64 %shifted to i32
%onebit = shl i32 1, %numlowbits
%mask = add nsw i32 %onebit, -1
%masked = and i32 %mask, %truncshifted
ret i32 %masked
}
; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
; Masking is 64-bit. Then truncation.
define i32 @bextr64_32_a2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_a2:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: sub w8, w8, #1
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%onebit = shl i32 1, %numlowbits
%mask = add nsw i32 %onebit, -1
%zextmask = zext i32 %mask to i64
%masked = and i64 %zextmask, %shifted
%truncmasked = trunc i64 %masked to i32
ret i32 %truncmasked
}
; ---------------------------------------------------------------------------- ;
; Pattern b. 32-bit
; ---------------------------------------------------------------------------- ;
define i32 @bextr32_b0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_b0:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: lsr w9, w0, w1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: bic w0, w9, w8
; CHECK-NEXT: ret
%shifted = lshr i32 %val, %numskipbits
%notmask = shl i32 -1, %numlowbits
%mask = xor i32 %notmask, -1
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_b1_indexzext(i32 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
; CHECK-LABEL: bextr32_b1_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: lsr w9, w0, w1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: bic w0, w9, w8
; CHECK-NEXT: ret
%skip = zext i8 %numskipbits to i32
%shifted = lshr i32 %val, %skip
%conv = zext i8 %numlowbits to i32
%notmask = shl i32 -1, %conv
%mask = xor i32 %notmask, -1
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_b2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_b2_load:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: lsr w9, w9, w1
; CHECK-NEXT: bic w0, w9, w8
; CHECK-NEXT: ret
%val = load i32, i32* %w
%shifted = lshr i32 %val, %numskipbits
%notmask = shl i32 -1, %numlowbits
%mask = xor i32 %notmask, -1
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_b3_load_indexzext(i32* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
; CHECK-LABEL: bextr32_b3_load_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: lsr w9, w9, w1
; CHECK-NEXT: bic w0, w9, w8
; CHECK-NEXT: ret
%val = load i32, i32* %w
%skip = zext i8 %numskipbits to i32
%shifted = lshr i32 %val, %skip
%conv = zext i8 %numlowbits to i32
%notmask = shl i32 -1, %conv
%mask = xor i32 %notmask, -1
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_b4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_b4_commutative:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: lsr w9, w0, w1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: bic w0, w9, w8
; CHECK-NEXT: ret
%shifted = lshr i32 %val, %numskipbits
%notmask = shl i32 -1, %numlowbits
%mask = xor i32 %notmask, -1
%masked = and i32 %shifted, %mask ; swapped order
ret i32 %masked
}
; 64-bit
define i64 @bextr64_b0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_b0:
; CHECK: // %bb.0:
; CHECK-NEXT: mov x8, #-1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: bic x0, x9, x8
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%notmask = shl i64 -1, %numlowbits
%mask = xor i64 %notmask, -1
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_b1_indexzext(i64 %val, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
; CHECK-LABEL: bextr64_b1_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov x8, #-1
; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: bic x0, x9, x8
; CHECK-NEXT: ret
%skip = zext i8 %numskipbits to i64
%shifted = lshr i64 %val, %skip
%conv = zext i8 %numlowbits to i64
%notmask = shl i64 -1, %conv
%mask = xor i64 %notmask, -1
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_b2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_b2_load:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov x8, #-1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: lsr x9, x9, x1
; CHECK-NEXT: bic x0, x9, x8
; CHECK-NEXT: ret
%val = load i64, i64* %w
%shifted = lshr i64 %val, %numskipbits
%notmask = shl i64 -1, %numlowbits
%mask = xor i64 %notmask, -1
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_b3_load_indexzext(i64* %w, i8 zeroext %numskipbits, i8 zeroext %numlowbits) nounwind {
; CHECK-LABEL: bextr64_b3_load_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov x8, #-1
; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: lsr x9, x9, x1
; CHECK-NEXT: bic x0, x9, x8
; CHECK-NEXT: ret
%val = load i64, i64* %w
%skip = zext i8 %numskipbits to i64
%shifted = lshr i64 %val, %skip
%conv = zext i8 %numlowbits to i64
%notmask = shl i64 -1, %conv
%mask = xor i64 %notmask, -1
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_b4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_b4_commutative:
; CHECK: // %bb.0:
; CHECK-NEXT: mov x8, #-1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: bic x0, x9, x8
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%notmask = shl i64 -1, %numlowbits
%mask = xor i64 %notmask, -1
%masked = and i64 %shifted, %mask ; swapped order
ret i64 %masked
}
; 64-bit, but with 32-bit output
; Everything done in 64-bit, truncation happens last.
define i32 @bextr64_32_b0(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_b0:
; CHECK: // %bb.0:
; CHECK-NEXT: mov x8, #-1
; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x8, x8, x2
; CHECK-NEXT: bic w0, w9, w8
; CHECK-NEXT: ret
%shiftedval = lshr i64 %val, %numskipbits
%widenumlowbits = zext i8 %numlowbits to i64
%notmask = shl nsw i64 -1, %widenumlowbits
%mask = xor i64 %notmask, -1
%wideres = and i64 %shiftedval, %mask
%res = trunc i64 %wideres to i32
ret i32 %res
}
; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
define i32 @bextr64_32_b1(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_b1:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: bic w0, w9, w8
; CHECK-NEXT: ret
%shiftedval = lshr i64 %val, %numskipbits
%truncshiftedval = trunc i64 %shiftedval to i32
%widenumlowbits = zext i8 %numlowbits to i32
%notmask = shl nsw i32 -1, %widenumlowbits
%mask = xor i32 %notmask, -1
%res = and i32 %truncshiftedval, %mask
ret i32 %res
}
; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
; Masking is 64-bit. Then truncation.
define i32 @bextr64_32_b2(i64 %val, i64 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_b2:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: // kill: def $w2 killed $w2 def $x2
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl w8, w8, w2
; CHECK-NEXT: bic w0, w9, w8
; CHECK-NEXT: ret
%shiftedval = lshr i64 %val, %numskipbits
%widenumlowbits = zext i8 %numlowbits to i32
%notmask = shl nsw i32 -1, %widenumlowbits
%mask = xor i32 %notmask, -1
%zextmask = zext i32 %mask to i64
%wideres = and i64 %shiftedval, %zextmask
%res = trunc i64 %wideres to i32
ret i32 %res
}
; ---------------------------------------------------------------------------- ;
; Pattern c. 32-bit
; ---------------------------------------------------------------------------- ;
define i32 @bextr32_c0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_c0:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w2
; CHECK-NEXT: mov w9, #-1
; CHECK-NEXT: lsr w10, w0, w1
; CHECK-NEXT: lsr w8, w9, w8
; CHECK-NEXT: and w0, w8, w10
; CHECK-NEXT: ret
%shifted = lshr i32 %val, %numskipbits
%numhighbits = sub i32 32, %numlowbits
%mask = lshr i32 -1, %numhighbits
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_c1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_c1_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #32
; CHECK-NEXT: mov w9, #-1
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr w10, w0, w1
; CHECK-NEXT: lsr w8, w9, w8
; CHECK-NEXT: and w0, w8, w10
; CHECK-NEXT: ret
%skip = zext i8 %numskipbits to i32
%shifted = lshr i32 %val, %skip
%numhighbits = sub i8 32, %numlowbits
%sh_prom = zext i8 %numhighbits to i32
%mask = lshr i32 -1, %sh_prom
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_c2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_c2_load:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w2
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov w10, #-1
; CHECK-NEXT: lsr w9, w9, w1
; CHECK-NEXT: lsr w8, w10, w8
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%val = load i32, i32* %w
%shifted = lshr i32 %val, %numskipbits
%numhighbits = sub i32 32, %numlowbits
%mask = lshr i32 -1, %numhighbits
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_c3_load_indexzext(i32* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_c3_load_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #32
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: mov w10, #-1
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr w9, w9, w1
; CHECK-NEXT: lsr w8, w10, w8
; CHECK-NEXT: and w0, w8, w9
; CHECK-NEXT: ret
%val = load i32, i32* %w
%skip = zext i8 %numskipbits to i32
%shifted = lshr i32 %val, %skip
%numhighbits = sub i8 32, %numlowbits
%sh_prom = zext i8 %numhighbits to i32
%mask = lshr i32 -1, %sh_prom
%masked = and i32 %mask, %shifted
ret i32 %masked
}
define i32 @bextr32_c4_commutative(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_c4_commutative:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w2
; CHECK-NEXT: mov w9, #-1
; CHECK-NEXT: lsr w10, w0, w1
; CHECK-NEXT: lsr w8, w9, w8
; CHECK-NEXT: and w0, w10, w8
; CHECK-NEXT: ret
%shifted = lshr i32 %val, %numskipbits
%numhighbits = sub i32 32, %numlowbits
%mask = lshr i32 -1, %numhighbits
%masked = and i32 %shifted, %mask ; swapped order
ret i32 %masked
}
; 64-bit
define i64 @bextr64_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_c0:
; CHECK: // %bb.0:
; CHECK-NEXT: neg x8, x2
; CHECK-NEXT: mov x9, #-1
; CHECK-NEXT: lsr x10, x0, x1
; CHECK-NEXT: lsr x8, x9, x8
; CHECK-NEXT: and x0, x8, x10
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%numhighbits = sub i64 64, %numlowbits
%mask = lshr i64 -1, %numhighbits
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_c1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_c1_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #64
; CHECK-NEXT: mov x9, #-1
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr x10, x0, x1
; CHECK-NEXT: lsr x8, x9, x8
; CHECK-NEXT: and x0, x8, x10
; CHECK-NEXT: ret
%skip = zext i8 %numskipbits to i64
%shifted = lshr i64 %val, %skip
%numhighbits = sub i8 64, %numlowbits
%sh_prom = zext i8 %numhighbits to i64
%mask = lshr i64 -1, %sh_prom
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_c2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_c2_load:
; CHECK: // %bb.0:
; CHECK-NEXT: neg x8, x2
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: mov x10, #-1
; CHECK-NEXT: lsr x9, x9, x1
; CHECK-NEXT: lsr x8, x10, x8
; CHECK-NEXT: and x0, x8, x9
; CHECK-NEXT: ret
%val = load i64, i64* %w
%shifted = lshr i64 %val, %numskipbits
%numhighbits = sub i64 64, %numlowbits
%mask = lshr i64 -1, %numhighbits
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_c3_load_indexzext(i64* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_c3_load_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #64
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: mov x10, #-1
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr x9, x9, x1
; CHECK-NEXT: lsr x8, x10, x8
; CHECK-NEXT: and x0, x8, x9
; CHECK-NEXT: ret
%val = load i64, i64* %w
%skip = zext i8 %numskipbits to i64
%shifted = lshr i64 %val, %skip
%numhighbits = sub i8 64, %numlowbits
%sh_prom = zext i8 %numhighbits to i64
%mask = lshr i64 -1, %sh_prom
%masked = and i64 %mask, %shifted
ret i64 %masked
}
define i64 @bextr64_c4_commutative(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_c4_commutative:
; CHECK: // %bb.0:
; CHECK-NEXT: neg x8, x2
; CHECK-NEXT: mov x9, #-1
; CHECK-NEXT: lsr x10, x0, x1
; CHECK-NEXT: lsr x8, x9, x8
; CHECK-NEXT: and x0, x10, x8
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%numhighbits = sub i64 64, %numlowbits
%mask = lshr i64 -1, %numhighbits
%masked = and i64 %shifted, %mask ; swapped order
ret i64 %masked
}
; 64-bit, but with 32-bit output
; Everything done in 64-bit, truncation happens last.
define i32 @bextr64_32_c0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_c0:
; CHECK: // %bb.0:
; CHECK-NEXT: neg x8, x2
; CHECK-NEXT: mov x9, #-1
; CHECK-NEXT: lsr x10, x0, x1
; CHECK-NEXT: lsr x8, x9, x8
; CHECK-NEXT: and w0, w8, w10
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%numhighbits = sub i64 64, %numlowbits
%mask = lshr i64 -1, %numhighbits
%masked = and i64 %mask, %shifted
%res = trunc i64 %masked to i32
ret i32 %res
}
; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
define i32 @bextr64_32_c1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_c1:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w2
; CHECK-NEXT: mov w9, #-1
; CHECK-NEXT: lsr x10, x0, x1
; CHECK-NEXT: lsr w8, w9, w8
; CHECK-NEXT: and w0, w8, w10
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%truncshifted = trunc i64 %shifted to i32
%numhighbits = sub i32 32, %numlowbits
%mask = lshr i32 -1, %numhighbits
%masked = and i32 %mask, %truncshifted
ret i32 %masked
}
; Shifting happens in 64-bit. Mask is 32-bit, but extended to 64-bit.
; Masking is 64-bit. Then truncation.
define i32 @bextr64_32_c2(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_c2:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w2
; CHECK-NEXT: mov w9, #-1
; CHECK-NEXT: lsr x10, x0, x1
; CHECK-NEXT: lsr w8, w9, w8
; CHECK-NEXT: and w0, w8, w10
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%numhighbits = sub i32 32, %numlowbits
%mask = lshr i32 -1, %numhighbits
%zextmask = zext i32 %mask to i64
%masked = and i64 %zextmask, %shifted
%truncmasked = trunc i64 %masked to i32
ret i32 %truncmasked
}
; ---------------------------------------------------------------------------- ;
; Pattern d. 32-bit.
; ---------------------------------------------------------------------------- ;
define i32 @bextr32_d0(i32 %val, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_d0:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w2
; CHECK-NEXT: lsr w9, w0, w1
; CHECK-NEXT: lsl w9, w9, w8
; CHECK-NEXT: lsr w0, w9, w8
; CHECK-NEXT: ret
%shifted = lshr i32 %val, %numskipbits
%numhighbits = sub i32 32, %numlowbits
%highbitscleared = shl i32 %shifted, %numhighbits
%masked = lshr i32 %highbitscleared, %numhighbits
ret i32 %masked
}
define i32 @bextr32_d1_indexzext(i32 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_d1_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #32
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr w9, w0, w1
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: lsl w9, w9, w8
; CHECK-NEXT: lsr w0, w9, w8
; CHECK-NEXT: ret
%skip = zext i8 %numskipbits to i32
%shifted = lshr i32 %val, %skip
%numhighbits = sub i8 32, %numlowbits
%sh_prom = zext i8 %numhighbits to i32
%highbitscleared = shl i32 %shifted, %sh_prom
%masked = lshr i32 %highbitscleared, %sh_prom
ret i32 %masked
}
define i32 @bextr32_d2_load(i32* %w, i32 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_d2_load:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w8, [x0]
; CHECK-NEXT: neg w9, w2
; CHECK-NEXT: lsr w8, w8, w1
; CHECK-NEXT: lsl w8, w8, w9
; CHECK-NEXT: lsr w0, w8, w9
; CHECK-NEXT: ret
%val = load i32, i32* %w
%shifted = lshr i32 %val, %numskipbits
%numhighbits = sub i32 32, %numlowbits
%highbitscleared = shl i32 %shifted, %numhighbits
%masked = lshr i32 %highbitscleared, %numhighbits
ret i32 %masked
}
define i32 @bextr32_d3_load_indexzext(i32* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr32_d3_load_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #32
; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr w9, w9, w1
; CHECK-NEXT: lsl w9, w9, w8
; CHECK-NEXT: lsr w0, w9, w8
; CHECK-NEXT: ret
%val = load i32, i32* %w
%skip = zext i8 %numskipbits to i32
%shifted = lshr i32 %val, %skip
%numhighbits = sub i8 32, %numlowbits
%sh_prom = zext i8 %numhighbits to i32
%highbitscleared = shl i32 %shifted, %sh_prom
%masked = lshr i32 %highbitscleared, %sh_prom
ret i32 %masked
}
; 64-bit.
define i64 @bextr64_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_d0:
; CHECK: // %bb.0:
; CHECK-NEXT: neg x8, x2
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x9, x9, x8
; CHECK-NEXT: lsr x0, x9, x8
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%numhighbits = sub i64 64, %numlowbits
%highbitscleared = shl i64 %shifted, %numhighbits
%masked = lshr i64 %highbitscleared, %numhighbits
ret i64 %masked
}
define i64 @bextr64_d1_indexzext(i64 %val, i8 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_d1_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #64
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: lsl x9, x9, x8
; CHECK-NEXT: lsr x0, x9, x8
; CHECK-NEXT: ret
%skip = zext i8 %numskipbits to i64
%shifted = lshr i64 %val, %skip
%numhighbits = sub i8 64, %numlowbits
%sh_prom = zext i8 %numhighbits to i64
%highbitscleared = shl i64 %shifted, %sh_prom
%masked = lshr i64 %highbitscleared, %sh_prom
ret i64 %masked
}
define i64 @bextr64_d2_load(i64* %w, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_d2_load:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x8, [x0]
; CHECK-NEXT: neg x9, x2
; CHECK-NEXT: lsr x8, x8, x1
; CHECK-NEXT: lsl x8, x8, x9
; CHECK-NEXT: lsr x0, x8, x9
; CHECK-NEXT: ret
%val = load i64, i64* %w
%shifted = lshr i64 %val, %numskipbits
%numhighbits = sub i64 64, %numlowbits
%highbitscleared = shl i64 %shifted, %numhighbits
%masked = lshr i64 %highbitscleared, %numhighbits
ret i64 %masked
}
define i64 @bextr64_d3_load_indexzext(i64* %w, i8 %numskipbits, i8 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_d3_load_indexzext:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #64
; CHECK-NEXT: ldr x9, [x0]
; CHECK-NEXT: sub w8, w8, w2
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: lsr x9, x9, x1
; CHECK-NEXT: lsl x9, x9, x8
; CHECK-NEXT: lsr x0, x9, x8
; CHECK-NEXT: ret
%val = load i64, i64* %w
%skip = zext i8 %numskipbits to i64
%shifted = lshr i64 %val, %skip
%numhighbits = sub i8 64, %numlowbits
%sh_prom = zext i8 %numhighbits to i64
%highbitscleared = shl i64 %shifted, %sh_prom
%masked = lshr i64 %highbitscleared, %sh_prom
ret i64 %masked
}
; 64-bit, but with 32-bit output
; Everything done in 64-bit, truncation happens last.
define i32 @bextr64_32_d0(i64 %val, i64 %numskipbits, i64 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_d0:
; CHECK: // %bb.0:
; CHECK-NEXT: neg x8, x2
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl x9, x9, x8
; CHECK-NEXT: lsr x0, x9, x8
; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%numhighbits = sub i64 64, %numlowbits
%highbitscleared = shl i64 %shifted, %numhighbits
%masked = lshr i64 %highbitscleared, %numhighbits
%res = trunc i64 %masked to i32
ret i32 %res
}
; Shifting happens in 64-bit, then truncation. Masking is 32-bit.
define i32 @bextr64_32_d1(i64 %val, i64 %numskipbits, i32 %numlowbits) nounwind {
; CHECK-LABEL: bextr64_32_d1:
; CHECK: // %bb.0:
; CHECK-NEXT: neg w8, w2
; CHECK-NEXT: lsr x9, x0, x1
; CHECK-NEXT: lsl w9, w9, w8
; CHECK-NEXT: lsr w0, w9, w8
; CHECK-NEXT: ret
%shifted = lshr i64 %val, %numskipbits
%truncshifted = trunc i64 %shifted to i32
%numhighbits = sub i32 32, %numlowbits
%highbitscleared = shl i32 %truncshifted, %numhighbits
%masked = lshr i32 %highbitscleared, %numhighbits
ret i32 %masked
}
; ---------------------------------------------------------------------------- ;
; Constant
; ---------------------------------------------------------------------------- ;
; https://bugs.llvm.org/show_bug.cgi?id=38938
define void @pr38938(i32* %a0, i64* %a1) nounwind {
; CHECK-LABEL: pr38938:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr x8, [x1]
; CHECK-NEXT: ubfx x8, x8, #21, #10
; CHECK-NEXT: lsl x8, x8, #2
; CHECK-NEXT: ldr w9, [x0, x8]
; CHECK-NEXT: add w9, w9, #1
; CHECK-NEXT: str w9, [x0, x8]
; CHECK-NEXT: ret
%tmp = load i64, i64* %a1, align 8
%tmp1 = lshr i64 %tmp, 21
%tmp2 = and i64 %tmp1, 1023
%tmp3 = getelementptr inbounds i32, i32* %a0, i64 %tmp2
%tmp4 = load i32, i32* %tmp3, align 4
%tmp5 = add nsw i32 %tmp4, 1
store i32 %tmp5, i32* %tmp3, align 4
ret void
}
; The most canonical variant
define i32 @c0_i32(i32 %arg) nounwind {
; CHECK-LABEL: c0_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx w0, w0, #19, #10
; CHECK-NEXT: ret
%tmp0 = lshr i32 %arg, 19
%tmp1 = and i32 %tmp0, 1023
ret i32 %tmp1
}
; Should be still fine, but the mask is shifted
define i32 @c1_i32(i32 %arg) nounwind {
; CHECK-LABEL: c1_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: lsr w8, w0, #19
; CHECK-NEXT: and w0, w8, #0xffc
; CHECK-NEXT: ret
%tmp0 = lshr i32 %arg, 19
%tmp1 = and i32 %tmp0, 4092
ret i32 %tmp1
}
; Should be still fine, but the result is shifted left afterwards
define i32 @c2_i32(i32 %arg) nounwind {
; CHECK-LABEL: c2_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx w8, w0, #19, #10
; CHECK-NEXT: lsl w0, w8, #2
; CHECK-NEXT: ret
%tmp0 = lshr i32 %arg, 19
%tmp1 = and i32 %tmp0, 1023
%tmp2 = shl i32 %tmp1, 2
ret i32 %tmp2
}
; The mask covers newly shifted-in bit
define i32 @c4_i32_bad(i32 %arg) nounwind {
; CHECK-LABEL: c4_i32_bad:
; CHECK: // %bb.0:
; CHECK-NEXT: lsr w8, w0, #19
; CHECK-NEXT: and w0, w8, #0x1ffe
; CHECK-NEXT: ret
%tmp0 = lshr i32 %arg, 19
%tmp1 = and i32 %tmp0, 16382
ret i32 %tmp1
}
; i64
; The most canonical variant
define i64 @c0_i64(i64 %arg) nounwind {
; CHECK-LABEL: c0_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx x0, x0, #51, #10
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 1023
ret i64 %tmp1
}
; Should be still fine, but the mask is shifted
define i64 @c1_i64(i64 %arg) nounwind {
; CHECK-LABEL: c1_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: lsr x8, x0, #51
; CHECK-NEXT: and x0, x8, #0xffc
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 4092
ret i64 %tmp1
}
; Should be still fine, but the result is shifted left afterwards
define i64 @c2_i64(i64 %arg) nounwind {
; CHECK-LABEL: c2_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx x8, x0, #51, #10
; CHECK-NEXT: lsl x0, x8, #2
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 1023
%tmp2 = shl i64 %tmp1, 2
ret i64 %tmp2
}
; The mask covers newly shifted-in bit
define i64 @c4_i64_bad(i64 %arg) nounwind {
; CHECK-LABEL: c4_i64_bad:
; CHECK: // %bb.0:
; CHECK-NEXT: lsr x8, x0, #51
; CHECK-NEXT: and x0, x8, #0x1ffe
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 16382
ret i64 %tmp1
}
; ---------------------------------------------------------------------------- ;
; Constant, storing the result afterwards.
; ---------------------------------------------------------------------------- ;
; i32
; The most canonical variant
define void @c5_i32(i32 %arg, i32* %ptr) nounwind {
; CHECK-LABEL: c5_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx w8, w0, #19, #10
; CHECK-NEXT: str w8, [x1]
; CHECK-NEXT: ret
%tmp0 = lshr i32 %arg, 19
%tmp1 = and i32 %tmp0, 1023
store i32 %tmp1, i32* %ptr
ret void
}
; Should be still fine, but the mask is shifted
define void @c6_i32(i32 %arg, i32* %ptr) nounwind {
; CHECK-LABEL: c6_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx w8, w0, #19, #12
; CHECK-NEXT: str w8, [x1]
; CHECK-NEXT: ret
%tmp0 = lshr i32 %arg, 19
%tmp1 = and i32 %tmp0, 4095
store i32 %tmp1, i32* %ptr
ret void
}
; Should be still fine, but the result is shifted left afterwards
define void @c7_i32(i32 %arg, i32* %ptr) nounwind {
; CHECK-LABEL: c7_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx w8, w0, #19, #10
; CHECK-NEXT: lsl w8, w8, #2
; CHECK-NEXT: str w8, [x1]
; CHECK-NEXT: ret
%tmp0 = lshr i32 %arg, 19
%tmp1 = and i32 %tmp0, 1023
%tmp2 = shl i32 %tmp1, 2
store i32 %tmp2, i32* %ptr
ret void
}
; i64
; The most canonical variant
define void @c5_i64(i64 %arg, i64* %ptr) nounwind {
; CHECK-LABEL: c5_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx x8, x0, #51, #10
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 1023
store i64 %tmp1, i64* %ptr
ret void
}
; Should be still fine, but the mask is shifted
define void @c6_i64(i64 %arg, i64* %ptr) nounwind {
; CHECK-LABEL: c6_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx x8, x0, #51, #12
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 4095
store i64 %tmp1, i64* %ptr
ret void
}
; Should be still fine, but the result is shifted left afterwards
define void @c7_i64(i64 %arg, i64* %ptr) nounwind {
; CHECK-LABEL: c7_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ubfx x8, x0, #51, #10
; CHECK-NEXT: lsl x8, x8, #2
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
%tmp0 = lshr i64 %arg, 51
%tmp1 = and i64 %tmp0, 1023
%tmp2 = shl i64 %tmp1, 2
store i64 %tmp2, i64* %ptr
ret void
}