This change introduces a default schedule model for the RISCV target which leaves everything unchanged except the MicroOpBufferSize. The default value of this flag in NoSched is 0. Both configurations represent in order cores (i.e. no reorder window), the difference between them comes down to whether heuristics other than latency are allowed to apply. (Implementation details below) I left the processor models which explicitly set MicroOpBufferSize=0 unchanged in this patch, but strongly suspect we should change those too. Honestly, I think the LLVM wide default for this flag should be changed, but don't have the energy to manage the updates for all targets. Implementation wise, the effect of this change is that schedule units which are ready to run *except that* one of their predecessors may not have completed yet are added to the Available list, not the Pending one. The result of this is that it becomes possible to chose to schedule a node before it's ready cycle if the heuristics prefer. This is essentially chosing to insert a resource stall instead of e.g. increasing register pressure. Note that I was initially concerned there might be a correctness aspect (as in some kind of exposed pipeline design), but the generic scheduler doesn't seem to know how to insert noop instructions. Without that, a program wouldn't be guaranteed to schedule on an exposed pipeline depending on the program and schedule model in question. The effect of this is that we sometimes prefer register pressure in codegen results. This is mostly churn (or small wins) on scalar because we have many more registers, but is of major importance on vector - particularly high LMUL - because we effectively have many fewer registers and the relative cost of spilling is much higher. This is a significant improvement on high LMUL code quality for default rva23u configurations - or any non -mcpu vector configuration for that matter. Fixes #107532
424 lines
13 KiB
LLVM
424 lines
13 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=riscv32 -disable-block-placement -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck -check-prefixes=RV32I %s
|
|
; RUN: llc -mtriple=riscv64 -disable-block-placement -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck -check-prefixes=RV64I %s
|
|
; RUN: llc -mtriple=riscv64 -mattr=+xmipscmove -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck -check-prefix=RV64I-CCMOV %s
|
|
|
|
define signext i32 @foo(i32 signext %a, ptr %b) nounwind {
|
|
; RV32I-LABEL: foo:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: beq a0, a2, .LBB0_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_2:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: bne a0, a2, .LBB0_4
|
|
; RV32I-NEXT: # %bb.3:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_4:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: bltu a2, a0, .LBB0_6
|
|
; RV32I-NEXT: # %bb.5:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_6:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: bgeu a0, a2, .LBB0_8
|
|
; RV32I-NEXT: # %bb.7:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_8:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: bltu a0, a2, .LBB0_10
|
|
; RV32I-NEXT: # %bb.9:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_10:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: bgeu a2, a0, .LBB0_12
|
|
; RV32I-NEXT: # %bb.11:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_12:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: blt a2, a0, .LBB0_14
|
|
; RV32I-NEXT: # %bb.13:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_14:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: bge a0, a2, .LBB0_16
|
|
; RV32I-NEXT: # %bb.15:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_16:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: blt a0, a2, .LBB0_18
|
|
; RV32I-NEXT: # %bb.17:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_18:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: bge a2, a0, .LBB0_20
|
|
; RV32I-NEXT: # %bb.19:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_20:
|
|
; RV32I-NEXT: lw a2, 0(a1)
|
|
; RV32I-NEXT: blez a2, .LBB0_22
|
|
; RV32I-NEXT: # %bb.21:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: .LBB0_22:
|
|
; RV32I-NEXT: lw a3, 0(a1)
|
|
; RV32I-NEXT: bgez a2, .LBB0_24
|
|
; RV32I-NEXT: # %bb.23:
|
|
; RV32I-NEXT: mv a0, a3
|
|
; RV32I-NEXT: .LBB0_24:
|
|
; RV32I-NEXT: lw a3, 0(a1)
|
|
; RV32I-NEXT: li a4, 1024
|
|
; RV32I-NEXT: blt a4, a3, .LBB0_26
|
|
; RV32I-NEXT: # %bb.25:
|
|
; RV32I-NEXT: mv a0, a3
|
|
; RV32I-NEXT: .LBB0_26:
|
|
; RV32I-NEXT: lw a1, 0(a1)
|
|
; RV32I-NEXT: li a3, 2046
|
|
; RV32I-NEXT: bltu a3, a2, .LBB0_28
|
|
; RV32I-NEXT: # %bb.27:
|
|
; RV32I-NEXT: mv a0, a1
|
|
; RV32I-NEXT: .LBB0_28:
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: foo:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: beq a0, a2, .LBB0_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_2:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: bne a0, a2, .LBB0_4
|
|
; RV64I-NEXT: # %bb.3:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_4:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: bltu a2, a0, .LBB0_6
|
|
; RV64I-NEXT: # %bb.5:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_6:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: bgeu a0, a2, .LBB0_8
|
|
; RV64I-NEXT: # %bb.7:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_8:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: bltu a0, a2, .LBB0_10
|
|
; RV64I-NEXT: # %bb.9:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_10:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: bgeu a2, a0, .LBB0_12
|
|
; RV64I-NEXT: # %bb.11:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_12:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: blt a2, a0, .LBB0_14
|
|
; RV64I-NEXT: # %bb.13:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_14:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: bge a0, a2, .LBB0_16
|
|
; RV64I-NEXT: # %bb.15:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_16:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: blt a0, a2, .LBB0_18
|
|
; RV64I-NEXT: # %bb.17:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_18:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: bge a2, a0, .LBB0_20
|
|
; RV64I-NEXT: # %bb.19:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_20:
|
|
; RV64I-NEXT: lw a2, 0(a1)
|
|
; RV64I-NEXT: blez a2, .LBB0_22
|
|
; RV64I-NEXT: # %bb.21:
|
|
; RV64I-NEXT: mv a0, a2
|
|
; RV64I-NEXT: .LBB0_22:
|
|
; RV64I-NEXT: lw a3, 0(a1)
|
|
; RV64I-NEXT: bgez a2, .LBB0_24
|
|
; RV64I-NEXT: # %bb.23:
|
|
; RV64I-NEXT: mv a0, a3
|
|
; RV64I-NEXT: .LBB0_24:
|
|
; RV64I-NEXT: lw a3, 0(a1)
|
|
; RV64I-NEXT: li a4, 1024
|
|
; RV64I-NEXT: blt a4, a3, .LBB0_26
|
|
; RV64I-NEXT: # %bb.25:
|
|
; RV64I-NEXT: mv a0, a3
|
|
; RV64I-NEXT: .LBB0_26:
|
|
; RV64I-NEXT: lw a1, 0(a1)
|
|
; RV64I-NEXT: li a3, 2046
|
|
; RV64I-NEXT: bltu a3, a2, .LBB0_28
|
|
; RV64I-NEXT: # %bb.27:
|
|
; RV64I-NEXT: mv a0, a1
|
|
; RV64I-NEXT: .LBB0_28:
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV64I-CCMOV-LABEL: foo:
|
|
; RV64I-CCMOV: # %bb.0:
|
|
; RV64I-CCMOV-NEXT: lw a2, 0(a1)
|
|
; RV64I-CCMOV-NEXT: lw a3, 0(a1)
|
|
; RV64I-CCMOV-NEXT: xor a4, a0, a2
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a2, a0
|
|
; RV64I-CCMOV-NEXT: lw a2, 0(a1)
|
|
; RV64I-CCMOV-NEXT: xor a4, a0, a3
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a0, a3
|
|
; RV64I-CCMOV-NEXT: lw a3, 0(a1)
|
|
; RV64I-CCMOV-NEXT: sltu a4, a2, a0
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a0, a2
|
|
; RV64I-CCMOV-NEXT: lw a2, 0(a1)
|
|
; RV64I-CCMOV-NEXT: sltu a4, a0, a3
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a3, a0
|
|
; RV64I-CCMOV-NEXT: lw a3, 0(a1)
|
|
; RV64I-CCMOV-NEXT: sltu a4, a0, a2
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a0, a2
|
|
; RV64I-CCMOV-NEXT: lw a2, 0(a1)
|
|
; RV64I-CCMOV-NEXT: sltu a4, a3, a0
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a3, a0
|
|
; RV64I-CCMOV-NEXT: lw a3, 0(a1)
|
|
; RV64I-CCMOV-NEXT: sext.w a4, a0
|
|
; RV64I-CCMOV-NEXT: slt a4, a2, a4
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a0, a2
|
|
; RV64I-CCMOV-NEXT: lw a2, 0(a1)
|
|
; RV64I-CCMOV-NEXT: sext.w a4, a0
|
|
; RV64I-CCMOV-NEXT: slt a4, a4, a3
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a3, a0
|
|
; RV64I-CCMOV-NEXT: lw a3, 0(a1)
|
|
; RV64I-CCMOV-NEXT: sext.w a4, a0
|
|
; RV64I-CCMOV-NEXT: slt a4, a4, a2
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a0, a2
|
|
; RV64I-CCMOV-NEXT: lw a2, 0(a1)
|
|
; RV64I-CCMOV-NEXT: sext.w a4, a0
|
|
; RV64I-CCMOV-NEXT: slt a4, a3, a4
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a3, a0
|
|
; RV64I-CCMOV-NEXT: lw a3, 0(a1)
|
|
; RV64I-CCMOV-NEXT: slti a4, a2, 1
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a0, a2
|
|
; RV64I-CCMOV-NEXT: slti a4, a2, 0
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a3, a0
|
|
; RV64I-CCMOV-NEXT: lw a3, 0(a1)
|
|
; RV64I-CCMOV-NEXT: lw a1, 0(a1)
|
|
; RV64I-CCMOV-NEXT: slti a4, a3, 1025
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a4, a3, a0
|
|
; RV64I-CCMOV-NEXT: sltiu a2, a2, 2047
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a2, a1, a0
|
|
; RV64I-CCMOV-NEXT: sext.w a0, a0
|
|
; RV64I-CCMOV-NEXT: ret
|
|
%val1 = load volatile i32, ptr %b
|
|
%tst1 = icmp eq i32 %a, %val1
|
|
%val2 = select i1 %tst1, i32 %a, i32 %val1
|
|
|
|
%val3 = load volatile i32, ptr %b
|
|
%tst2 = icmp ne i32 %val2, %val3
|
|
%val4 = select i1 %tst2, i32 %val2, i32 %val3
|
|
|
|
%val5 = load volatile i32, ptr %b
|
|
%tst3 = icmp ugt i32 %val4, %val5
|
|
%val6 = select i1 %tst3, i32 %val4, i32 %val5
|
|
|
|
%val7 = load volatile i32, ptr %b
|
|
%tst4 = icmp uge i32 %val6, %val7
|
|
%val8 = select i1 %tst4, i32 %val6, i32 %val7
|
|
|
|
%val9 = load volatile i32, ptr %b
|
|
%tst5 = icmp ult i32 %val8, %val9
|
|
%val10 = select i1 %tst5, i32 %val8, i32 %val9
|
|
|
|
%val11 = load volatile i32, ptr %b
|
|
%tst6 = icmp ule i32 %val10, %val11
|
|
%val12 = select i1 %tst6, i32 %val10, i32 %val11
|
|
|
|
%val13 = load volatile i32, ptr %b
|
|
%tst7 = icmp sgt i32 %val12, %val13
|
|
%val14 = select i1 %tst7, i32 %val12, i32 %val13
|
|
|
|
%val15 = load volatile i32, ptr %b
|
|
%tst8 = icmp sge i32 %val14, %val15
|
|
%val16 = select i1 %tst8, i32 %val14, i32 %val15
|
|
|
|
%val17 = load volatile i32, ptr %b
|
|
%tst9 = icmp slt i32 %val16, %val17
|
|
%val18 = select i1 %tst9, i32 %val16, i32 %val17
|
|
|
|
%val19 = load volatile i32, ptr %b
|
|
%tst10 = icmp sle i32 %val18, %val19
|
|
%val20 = select i1 %tst10, i32 %val18, i32 %val19
|
|
|
|
%val21 = load volatile i32, ptr %b
|
|
%tst11 = icmp slt i32 %val21, 1
|
|
%val22 = select i1 %tst11, i32 %val20, i32 %val21
|
|
|
|
%val23 = load volatile i32, ptr %b
|
|
%tst12 = icmp sgt i32 %val21, -1
|
|
%val24 = select i1 %tst12, i32 %val22, i32 %val23
|
|
|
|
%val25 = load volatile i32, ptr %b
|
|
%tst13 = icmp sgt i32 %val25, 1024
|
|
%val26 = select i1 %tst13, i32 %val24, i32 %val25
|
|
|
|
%val27 = load volatile i32, ptr %b
|
|
%tst14 = icmp ugt i32 %val21, 2046
|
|
%val28 = select i1 %tst14, i32 %val26, i32 %val27
|
|
ret i32 %val28
|
|
}
|
|
|
|
; Test that we can ComputeNumSignBits across basic blocks when the live out is
|
|
; RISCVISD::SELECT_CC. There should be no slli+srai or sext.h in the output.
|
|
define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2, i16 signext %3) nounwind {
|
|
; RV32I-LABEL: numsignbits:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: addi sp, sp, -16
|
|
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
|
|
; RV32I-NEXT: mv s0, a3
|
|
; RV32I-NEXT: beqz a0, .LBB1_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: mv s0, a2
|
|
; RV32I-NEXT: .LBB1_2:
|
|
; RV32I-NEXT: beqz a1, .LBB1_4
|
|
; RV32I-NEXT: # %bb.3:
|
|
; RV32I-NEXT: mv a0, s0
|
|
; RV32I-NEXT: call bar
|
|
; RV32I-NEXT: .LBB1_4:
|
|
; RV32I-NEXT: mv a0, s0
|
|
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
|
|
; RV32I-NEXT: addi sp, sp, 16
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: numsignbits:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: addi sp, sp, -16
|
|
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-NEXT: mv s0, a3
|
|
; RV64I-NEXT: beqz a0, .LBB1_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: mv s0, a2
|
|
; RV64I-NEXT: .LBB1_2:
|
|
; RV64I-NEXT: beqz a1, .LBB1_4
|
|
; RV64I-NEXT: # %bb.3:
|
|
; RV64I-NEXT: mv a0, s0
|
|
; RV64I-NEXT: call bar
|
|
; RV64I-NEXT: .LBB1_4:
|
|
; RV64I-NEXT: mv a0, s0
|
|
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-NEXT: addi sp, sp, 16
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV64I-CCMOV-LABEL: numsignbits:
|
|
; RV64I-CCMOV: # %bb.0:
|
|
; RV64I-CCMOV-NEXT: addi sp, sp, -16
|
|
; RV64I-CCMOV-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
|
|
; RV64I-CCMOV-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
|
|
; RV64I-CCMOV-NEXT: mips.ccmov s0, a0, a2, a3
|
|
; RV64I-CCMOV-NEXT: beqz a1, .LBB1_2
|
|
; RV64I-CCMOV-NEXT: # %bb.1:
|
|
; RV64I-CCMOV-NEXT: mv a0, s0
|
|
; RV64I-CCMOV-NEXT: call bar
|
|
; RV64I-CCMOV-NEXT: .LBB1_2:
|
|
; RV64I-CCMOV-NEXT: mv a0, s0
|
|
; RV64I-CCMOV-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
|
|
; RV64I-CCMOV-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
|
|
; RV64I-CCMOV-NEXT: addi sp, sp, 16
|
|
; RV64I-CCMOV-NEXT: ret
|
|
%5 = icmp eq i16 %0, 0
|
|
%6 = select i1 %5, i16 %3, i16 %2
|
|
%7 = icmp eq i16 %1, 0
|
|
br i1 %7, label %9, label %8
|
|
|
|
8: ; preds = %4
|
|
tail call void @bar(i16 signext %6)
|
|
br label %9
|
|
|
|
9: ; preds = %8, %4
|
|
ret i16 %6
|
|
}
|
|
|
|
declare void @bar(i16 signext)
|
|
|
|
define i32 @select_sge_int16min(i32 signext %x, i32 signext %y, i32 signext %z) {
|
|
; RV32I-LABEL: select_sge_int16min:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: lui a3, 1048560
|
|
; RV32I-NEXT: addi a3, a3, -1
|
|
; RV32I-NEXT: blt a3, a0, .LBB2_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: mv a1, a2
|
|
; RV32I-NEXT: .LBB2_2:
|
|
; RV32I-NEXT: mv a0, a1
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: select_sge_int16min:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: lui a3, 1048560
|
|
; RV64I-NEXT: addiw a3, a3, -1
|
|
; RV64I-NEXT: blt a3, a0, .LBB2_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: mv a1, a2
|
|
; RV64I-NEXT: .LBB2_2:
|
|
; RV64I-NEXT: mv a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV64I-CCMOV-LABEL: select_sge_int16min:
|
|
; RV64I-CCMOV: # %bb.0:
|
|
; RV64I-CCMOV-NEXT: lui a3, 1048560
|
|
; RV64I-CCMOV-NEXT: addiw a3, a3, -1
|
|
; RV64I-CCMOV-NEXT: slt a0, a3, a0
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a0, a1, a2
|
|
; RV64I-CCMOV-NEXT: ret
|
|
%a = icmp sge i32 %x, -65536
|
|
%b = select i1 %a, i32 %y, i32 %z
|
|
ret i32 %b
|
|
}
|
|
|
|
define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) {
|
|
; RV32I-LABEL: select_sge_int32min:
|
|
; RV32I: # %bb.0:
|
|
; RV32I-NEXT: li a6, -1
|
|
; RV32I-NEXT: bne a1, a6, .LBB3_2
|
|
; RV32I-NEXT: # %bb.1:
|
|
; RV32I-NEXT: slti a0, a0, 0
|
|
; RV32I-NEXT: j .LBB3_3
|
|
; RV32I-NEXT: .LBB3_2:
|
|
; RV32I-NEXT: slti a0, a1, 0
|
|
; RV32I-NEXT: xori a0, a0, 1
|
|
; RV32I-NEXT: .LBB3_3:
|
|
; RV32I-NEXT: bnez a0, .LBB3_5
|
|
; RV32I-NEXT: # %bb.4:
|
|
; RV32I-NEXT: mv a2, a4
|
|
; RV32I-NEXT: mv a3, a5
|
|
; RV32I-NEXT: .LBB3_5:
|
|
; RV32I-NEXT: mv a0, a2
|
|
; RV32I-NEXT: mv a1, a3
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: select_sge_int32min:
|
|
; RV64I: # %bb.0:
|
|
; RV64I-NEXT: lui a3, 524288
|
|
; RV64I-NEXT: addi a3, a3, -1
|
|
; RV64I-NEXT: blt a3, a0, .LBB3_2
|
|
; RV64I-NEXT: # %bb.1:
|
|
; RV64I-NEXT: mv a1, a2
|
|
; RV64I-NEXT: .LBB3_2:
|
|
; RV64I-NEXT: mv a0, a1
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV64I-CCMOV-LABEL: select_sge_int32min:
|
|
; RV64I-CCMOV: # %bb.0:
|
|
; RV64I-CCMOV-NEXT: lui a3, 524288
|
|
; RV64I-CCMOV-NEXT: addi a3, a3, -1
|
|
; RV64I-CCMOV-NEXT: slt a0, a3, a0
|
|
; RV64I-CCMOV-NEXT: mips.ccmov a0, a0, a1, a2
|
|
; RV64I-CCMOV-NEXT: ret
|
|
%a = icmp sge i64 %x, -2147483648
|
|
%b = select i1 %a, i64 %y, i64 %z
|
|
ret i64 %b
|
|
}
|