Files
clang-p2996/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
Craig Topper 7b0c41841e [RISCV] Move compressible registers to the beginning of the FP allocation order.
We don't have very many compressible FP instructions, just load and store.
These instruction require the FP register to be f8-f15.

This patch changes the FP allocation order to prioritize f10-f15 first.
These are also the FP argument registers. So I allocated them in reverse
order starting at f15 to avoid taking the first argument registers.
This appears to match gcc allocation order.

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D146488
2023-03-27 17:29:28 -07:00

383 lines
11 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
; Selects of wide values are split into two selects, which can easily cause
; unnecessary control flow. Here we check some cases where we can currently
; emit a sequence of selects with shared control flow.
define i64 @cmovcc64(i32 signext %a, i64 %b, i64 %c) nounwind {
; RV32I-LABEL: cmovcc64:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: li a5, 123
; RV32I-NEXT: beq a0, a5, .LBB0_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: .LBB0_2: # %entry
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovcc64:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: li a3, 123
; RV64I-NEXT: beq a0, a3, .LBB0_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: .LBB0_2: # %entry
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 123
%cond = select i1 %cmp, i64 %b, i64 %c
ret i64 %cond
}
define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind {
; RV32I-LABEL: cmovcc128:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: xori a1, a1, 123
; RV32I-NEXT: or a1, a1, a2
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: beqz a1, .LBB1_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: .LBB1_2: # %entry
; RV32I-NEXT: beqz a1, .LBB1_5
; RV32I-NEXT: # %bb.3: # %entry
; RV32I-NEXT: addi a5, a4, 4
; RV32I-NEXT: bnez a1, .LBB1_6
; RV32I-NEXT: .LBB1_4:
; RV32I-NEXT: addi a6, a3, 8
; RV32I-NEXT: j .LBB1_7
; RV32I-NEXT: .LBB1_5:
; RV32I-NEXT: addi a5, a3, 4
; RV32I-NEXT: beqz a1, .LBB1_4
; RV32I-NEXT: .LBB1_6: # %entry
; RV32I-NEXT: addi a6, a4, 8
; RV32I-NEXT: .LBB1_7: # %entry
; RV32I-NEXT: lw a2, 0(a2)
; RV32I-NEXT: lw a5, 0(a5)
; RV32I-NEXT: lw a6, 0(a6)
; RV32I-NEXT: beqz a1, .LBB1_9
; RV32I-NEXT: # %bb.8: # %entry
; RV32I-NEXT: addi a3, a4, 12
; RV32I-NEXT: j .LBB1_10
; RV32I-NEXT: .LBB1_9:
; RV32I-NEXT: addi a3, a3, 12
; RV32I-NEXT: .LBB1_10: # %entry
; RV32I-NEXT: lw a1, 0(a3)
; RV32I-NEXT: sw a1, 12(a0)
; RV32I-NEXT: sw a6, 8(a0)
; RV32I-NEXT: sw a5, 4(a0)
; RV32I-NEXT: sw a2, 0(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovcc128:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: li a5, 123
; RV64I-NEXT: beq a0, a5, .LBB1_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a1, a3
; RV64I-NEXT: mv a2, a4
; RV64I-NEXT: .LBB1_2: # %entry
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: ret
entry:
%cmp = icmp eq i64 %a, 123
%cond = select i1 %cmp, i128 %b, i128 %c
ret i128 %cond
}
define i64 @cmov64(i1 %a, i64 %b, i64 %c) nounwind {
; RV32I-LABEL: cmov64:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: andi a5, a0, 1
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: bnez a5, .LBB2_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: .LBB2_2: # %entry
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmov64:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a3, a0, 1
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: bnez a3, .LBB2_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB2_2: # %entry
; RV64I-NEXT: ret
entry:
%cond = select i1 %a, i64 %b, i64 %c
ret i64 %cond
}
define i128 @cmov128(i1 %a, i128 %b, i128 %c) nounwind {
; RV32I-LABEL: cmov128:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: andi a1, a1, 1
; RV32I-NEXT: mv a4, a2
; RV32I-NEXT: bnez a1, .LBB3_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: mv a4, a3
; RV32I-NEXT: .LBB3_2: # %entry
; RV32I-NEXT: bnez a1, .LBB3_5
; RV32I-NEXT: # %bb.3: # %entry
; RV32I-NEXT: addi a5, a3, 4
; RV32I-NEXT: beqz a1, .LBB3_6
; RV32I-NEXT: .LBB3_4:
; RV32I-NEXT: addi a6, a2, 8
; RV32I-NEXT: j .LBB3_7
; RV32I-NEXT: .LBB3_5:
; RV32I-NEXT: addi a5, a2, 4
; RV32I-NEXT: bnez a1, .LBB3_4
; RV32I-NEXT: .LBB3_6: # %entry
; RV32I-NEXT: addi a6, a3, 8
; RV32I-NEXT: .LBB3_7: # %entry
; RV32I-NEXT: lw a4, 0(a4)
; RV32I-NEXT: lw a5, 0(a5)
; RV32I-NEXT: lw a6, 0(a6)
; RV32I-NEXT: bnez a1, .LBB3_9
; RV32I-NEXT: # %bb.8: # %entry
; RV32I-NEXT: addi a2, a3, 12
; RV32I-NEXT: j .LBB3_10
; RV32I-NEXT: .LBB3_9:
; RV32I-NEXT: addi a2, a2, 12
; RV32I-NEXT: .LBB3_10: # %entry
; RV32I-NEXT: lw a1, 0(a2)
; RV32I-NEXT: sw a1, 12(a0)
; RV32I-NEXT: sw a6, 8(a0)
; RV32I-NEXT: sw a5, 4(a0)
; RV32I-NEXT: sw a4, 0(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmov128:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a5, a0, 1
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: bnez a5, .LBB3_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: mv a2, a4
; RV64I-NEXT: .LBB3_2: # %entry
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: ret
entry:
%cond = select i1 %a, i128 %b, i128 %c
ret i128 %cond
}
define float @cmovfloat(i1 %a, float %b, float %c, float %d, float %e) nounwind {
; RV32I-LABEL: cmovfloat:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: bnez a0, .LBB4_2
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: fmv.w.x fa5, a4
; RV32I-NEXT: fmv.w.x fa4, a2
; RV32I-NEXT: j .LBB4_3
; RV32I-NEXT: .LBB4_2:
; RV32I-NEXT: fmv.w.x fa5, a3
; RV32I-NEXT: fmv.w.x fa4, a1
; RV32I-NEXT: .LBB4_3: # %entry
; RV32I-NEXT: fadd.s fa5, fa4, fa5
; RV32I-NEXT: fmv.x.w a0, fa5
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovfloat:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: bnez a0, .LBB4_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: fmv.w.x fa5, a4
; RV64I-NEXT: fmv.w.x fa4, a2
; RV64I-NEXT: j .LBB4_3
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: fmv.w.x fa5, a3
; RV64I-NEXT: fmv.w.x fa4, a1
; RV64I-NEXT: .LBB4_3: # %entry
; RV64I-NEXT: fadd.s fa5, fa4, fa5
; RV64I-NEXT: fmv.x.w a0, fa5
; RV64I-NEXT: ret
entry:
%cond1 = select i1 %a, float %b, float %c
%cond2 = select i1 %a, float %d, float %e
%ret = fadd float %cond1, %cond2
ret float %ret
}
define double @cmovdouble(i1 %a, double %b, double %c) nounwind {
; RV32I-LABEL: cmovdouble:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw a3, 8(sp)
; RV32I-NEXT: sw a4, 12(sp)
; RV32I-NEXT: fld fa5, 8(sp)
; RV32I-NEXT: sw a1, 8(sp)
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: sw a2, 12(sp)
; RV32I-NEXT: beqz a0, .LBB5_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: fld fa5, 8(sp)
; RV32I-NEXT: .LBB5_2: # %entry
; RV32I-NEXT: fsd fa5, 8(sp)
; RV32I-NEXT: lw a0, 8(sp)
; RV32I-NEXT: lw a1, 12(sp)
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovdouble:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: bnez a0, .LBB5_2
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: fmv.d.x fa5, a2
; RV64I-NEXT: fmv.x.d a0, fa5
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: fmv.d.x fa5, a1
; RV64I-NEXT: fmv.x.d a0, fa5
; RV64I-NEXT: ret
entry:
%cond = select i1 %a, double %b, double %c
ret double %cond
}
; Check that selects with dependencies on previous ones aren't incorrectly
; optimized.
define i32 @cmovccdep(i32 signext %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmovccdep:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: li a4, 123
; RV32I-NEXT: bne a0, a4, .LBB6_3
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: mv a2, a1
; RV32I-NEXT: bne a0, a4, .LBB6_4
; RV32I-NEXT: .LBB6_2: # %entry
; RV32I-NEXT: add a0, a1, a2
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB6_3: # %entry
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: beq a0, a4, .LBB6_2
; RV32I-NEXT: .LBB6_4: # %entry
; RV32I-NEXT: add a0, a1, a3
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovccdep:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: li a4, 123
; RV64I-NEXT: bne a0, a4, .LBB6_3
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: mv a2, a1
; RV64I-NEXT: bne a0, a4, .LBB6_4
; RV64I-NEXT: .LBB6_2: # %entry
; RV64I-NEXT: addw a0, a1, a2
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB6_3: # %entry
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: beq a0, a4, .LBB6_2
; RV64I-NEXT: .LBB6_4: # %entry
; RV64I-NEXT: addw a0, a1, a3
; RV64I-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 123
%cond1 = select i1 %cmp, i32 %b, i32 %c
%cond2 = select i1 %cmp, i32 %cond1, i32 %d
%ret = add i32 %cond1, %cond2
ret i32 %ret
}
; Check that selects with different conditions aren't incorrectly optimized.
define i32 @cmovdiffcc(i1 %a, i1 %b, i32 %c, i32 %d, i32 %e, i32 %f) nounwind {
; RV32I-LABEL: cmovdiffcc:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: andi a0, a0, 1
; RV32I-NEXT: andi a1, a1, 1
; RV32I-NEXT: beqz a0, .LBB7_3
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: beqz a1, .LBB7_4
; RV32I-NEXT: .LBB7_2: # %entry
; RV32I-NEXT: add a0, a2, a4
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB7_3: # %entry
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: bnez a1, .LBB7_2
; RV32I-NEXT: .LBB7_4: # %entry
; RV32I-NEXT: add a0, a2, a5
; RV32I-NEXT: ret
;
; RV64I-LABEL: cmovdiffcc:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a0, a0, 1
; RV64I-NEXT: andi a1, a1, 1
; RV64I-NEXT: beqz a0, .LBB7_3
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: beqz a1, .LBB7_4
; RV64I-NEXT: .LBB7_2: # %entry
; RV64I-NEXT: addw a0, a2, a4
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB7_3: # %entry
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: bnez a1, .LBB7_2
; RV64I-NEXT: .LBB7_4: # %entry
; RV64I-NEXT: addw a0, a2, a5
; RV64I-NEXT: ret
entry:
%cond1 = select i1 %a, i32 %c, i32 %d
%cond2 = select i1 %b, i32 %e, i32 %f
%ret = add i32 %cond1, %cond2
ret i32 %ret
}
define float @CascadedSelect(float noundef %a) {
; RV32I-LABEL: CascadedSelect:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: fmv.w.x fa5, a0
; RV32I-NEXT: lui a0, 260096
; RV32I-NEXT: fmv.w.x fa4, a0
; RV32I-NEXT: flt.s a0, fa4, fa5
; RV32I-NEXT: bnez a0, .LBB8_3
; RV32I-NEXT: # %bb.1: # %entry
; RV32I-NEXT: fmv.w.x fa4, zero
; RV32I-NEXT: flt.s a0, fa5, fa4
; RV32I-NEXT: bnez a0, .LBB8_3
; RV32I-NEXT: # %bb.2: # %entry
; RV32I-NEXT: fmv.s fa4, fa5
; RV32I-NEXT: .LBB8_3: # %entry
; RV32I-NEXT: fmv.x.w a0, fa4
; RV32I-NEXT: ret
;
; RV64I-LABEL: CascadedSelect:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: fmv.w.x fa5, a0
; RV64I-NEXT: lui a0, 260096
; RV64I-NEXT: fmv.w.x fa4, a0
; RV64I-NEXT: flt.s a0, fa4, fa5
; RV64I-NEXT: bnez a0, .LBB8_3
; RV64I-NEXT: # %bb.1: # %entry
; RV64I-NEXT: fmv.w.x fa4, zero
; RV64I-NEXT: flt.s a0, fa5, fa4
; RV64I-NEXT: bnez a0, .LBB8_3
; RV64I-NEXT: # %bb.2: # %entry
; RV64I-NEXT: fmv.s fa4, fa5
; RV64I-NEXT: .LBB8_3: # %entry
; RV64I-NEXT: fmv.x.w a0, fa4
; RV64I-NEXT: ret
entry:
%cmp = fcmp ogt float %a, 1.000000e+00
%cmp1 = fcmp olt float %a, 0.000000e+00
%.a = select i1 %cmp1, float 0.000000e+00, float %a
%retval.0 = select i1 %cmp, float 1.000000e+00, float %.a
ret float %retval.0
}