This demonstrates a possible fix for PR48760 - for compares with constants, canonicalize the SGT/UGT condition code to use SGE/UGE which should reduce the number of EFLAGs bits we need to read. As discussed on PR48760, some EFLAG bits are treated independently which can require additional uops to merge together for certain CMOVcc/SETcc/etc. modes. I've limited this to cases where the constant increment doesn't result in a larger encoding or additional i64 constant materializations. Differential Revision: https://reviews.llvm.org/D101074
104 lines
2.6 KiB
LLVM
104 lines
2.6 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s
|
|
; rdar://7329206
|
|
|
|
define zeroext i16 @t1(i16 zeroext %x) nounwind readnone ssp {
|
|
; CHECK-LABEL: t1:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
; CHECK-NEXT: cmpw $27, %di
|
|
; CHECK-NEXT: setae %al
|
|
; CHECK-NEXT: shll $5, %eax
|
|
; CHECK-NEXT: retq
|
|
%t0 = icmp ugt i16 %x, 26
|
|
%if = select i1 %t0, i16 32, i16 0
|
|
ret i16 %if
|
|
}
|
|
|
|
define zeroext i16 @t2(i16 zeroext %x) nounwind readnone ssp {
|
|
; CHECK-LABEL: t2:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
; CHECK-NEXT: cmpw $26, %di
|
|
; CHECK-NEXT: setb %al
|
|
; CHECK-NEXT: shll $5, %eax
|
|
; CHECK-NEXT: retq
|
|
%t0 = icmp ult i16 %x, 26
|
|
%if = select i1 %t0, i16 32, i16 0
|
|
ret i16 %if
|
|
}
|
|
|
|
define i64 @t3(i64 %x) nounwind readnone ssp {
|
|
; CHECK-LABEL: t3:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: xorl %eax, %eax
|
|
; CHECK-NEXT: cmpq $18, %rdi
|
|
; CHECK-NEXT: setb %al
|
|
; CHECK-NEXT: shlq $6, %rax
|
|
; CHECK-NEXT: retq
|
|
%t0 = icmp ult i64 %x, 18
|
|
%if = select i1 %t0, i64 64, i64 0
|
|
ret i64 %if
|
|
}
|
|
|
|
@v4 = common global i32 0, align 4
|
|
|
|
define i32 @t4(i32 %a) {
|
|
; CHECK-LABEL: t4:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: movq _v4@GOTPCREL(%rip), %rax
|
|
; CHECK-NEXT: cmpl $1, (%rax)
|
|
; CHECK-NEXT: movw $1, %ax
|
|
; CHECK-NEXT: adcw $0, %ax
|
|
; CHECK-NEXT: shll $16, %eax
|
|
; CHECK-NEXT: retq
|
|
%t0 = load i32, i32* @v4, align 4
|
|
%not.tobool = icmp eq i32 %t0, 0
|
|
%conv.i = sext i1 %not.tobool to i16
|
|
%call.lobit = lshr i16 %conv.i, 15
|
|
%add.i.1 = add nuw nsw i16 %call.lobit, 1
|
|
%conv4.2 = zext i16 %add.i.1 to i32
|
|
%add = shl nuw nsw i32 %conv4.2, 16
|
|
ret i32 %add
|
|
}
|
|
|
|
define i8 @t5(i32 %a) #0 {
|
|
; CHECK-LABEL: t5:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: testl %edi, %edi
|
|
; CHECK-NEXT: setns %al
|
|
; CHECK-NEXT: retq
|
|
%.lobit = lshr i32 %a, 31
|
|
%trunc = trunc i32 %.lobit to i8
|
|
%.not = xor i8 %trunc, 1
|
|
ret i8 %.not
|
|
}
|
|
|
|
define zeroext i1 @t6(i32 %a) #0 {
|
|
; CHECK-LABEL: t6:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: testl %edi, %edi
|
|
; CHECK-NEXT: setns %al
|
|
; CHECK-NEXT: retq
|
|
%.lobit = lshr i32 %a, 31
|
|
%trunc = trunc i32 %.lobit to i1
|
|
%.not = xor i1 %trunc, 1
|
|
ret i1 %.not
|
|
}
|
|
|
|
define i16 @shift_and(i16 %a) {
|
|
; CHECK-LABEL: shift_and:
|
|
; CHECK: ## %bb.0:
|
|
; CHECK-NEXT: movl %edi, %eax
|
|
; CHECK-NEXT: shrl $10, %eax
|
|
; CHECK-NEXT: andl $1, %eax
|
|
; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
|
|
; CHECK-NEXT: retq
|
|
%and = and i16 %a, 1024
|
|
%cmp = icmp ne i16 %and, 0
|
|
%conv = zext i1 %cmp to i16
|
|
ret i16 %conv
|
|
}
|
|
|
|
attributes #0 = { "target-cpu"="skylake-avx512" }
|