Files
clang-p2996/llvm/test/CodeGen/RISCV/select-cc.ll
Craig Topper 6227b7ae31 [RISCV] Move xori creation for scalar setccs to lowering.
This patch enables expansion or custom lowering for some integer
condition codes so that any xori that is needed is created before
the last DAG combine to enable optimization.

I've seen cases where we end up with
(or (xori (setcc), 1), (xori (setcc), 1)) which we would ideally
convert to (xori (and (setcc), (setcc)), 1). This patch doesn't
accomplish that yet, but it should allow us to add DAG
combines as follow ups. Example https://godbolt.org/z/Y4qnvsq1b

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D131729
2022-08-19 13:51:53 -07:00

513 lines
16 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32ZBT %s
; RUN: llc -mtriple=riscv64 -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64ZBT %s
define signext i32 @foo(i32 signext %a, i32 *%b) nounwind {
; RV32I-LABEL: foo:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: beq a0, a2, .LBB0_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bne a0, a2, .LBB0_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_4:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bltu a2, a0, .LBB0_6
; RV32I-NEXT: # %bb.5:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_6:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bgeu a0, a2, .LBB0_8
; RV32I-NEXT: # %bb.7:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_8:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bltu a0, a2, .LBB0_10
; RV32I-NEXT: # %bb.9:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_10:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bgeu a2, a0, .LBB0_12
; RV32I-NEXT: # %bb.11:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_12:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: blt a2, a0, .LBB0_14
; RV32I-NEXT: # %bb.13:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_14:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bge a0, a2, .LBB0_16
; RV32I-NEXT: # %bb.15:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_16:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: blt a0, a2, .LBB0_18
; RV32I-NEXT: # %bb.17:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_18:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: bge a2, a0, .LBB0_20
; RV32I-NEXT: # %bb.19:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_20:
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: blez a2, .LBB0_22
; RV32I-NEXT: # %bb.21:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_22:
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: bgez a2, .LBB0_24
; RV32I-NEXT: # %bb.23:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB0_24:
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: li a4, 1024
; RV32I-NEXT: blt a4, a3, .LBB0_26
; RV32I-NEXT: # %bb.25:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB0_26:
; RV32I-NEXT: lw a1, 0(a1)
; RV32I-NEXT: li a3, 2046
; RV32I-NEXT: bltu a3, a2, .LBB0_28
; RV32I-NEXT: # %bb.27:
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: .LBB0_28:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: foo:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: xor a4, a0, a2
; RV32ZBT-NEXT: cmov a0, a4, a2, a0
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: xor a4, a0, a3
; RV32ZBT-NEXT: cmov a0, a4, a0, a3
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: sltu a4, a2, a0
; RV32ZBT-NEXT: cmov a0, a4, a0, a2
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: sltu a4, a0, a3
; RV32ZBT-NEXT: cmov a0, a4, a3, a0
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: sltu a4, a0, a2
; RV32ZBT-NEXT: cmov a0, a4, a0, a2
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: sltu a4, a3, a0
; RV32ZBT-NEXT: cmov a0, a4, a3, a0
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: slt a4, a2, a0
; RV32ZBT-NEXT: cmov a0, a4, a0, a2
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: slt a4, a0, a3
; RV32ZBT-NEXT: cmov a0, a4, a3, a0
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: slt a4, a0, a2
; RV32ZBT-NEXT: lw a5, 0(a1)
; RV32ZBT-NEXT: cmov a0, a4, a0, a2
; RV32ZBT-NEXT: slt a2, a3, a0
; RV32ZBT-NEXT: cmov a0, a2, a3, a0
; RV32ZBT-NEXT: slti a2, a5, 1
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: cmov a0, a2, a0, a5
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: slti a4, a5, 0
; RV32ZBT-NEXT: cmov a0, a4, a3, a0
; RV32ZBT-NEXT: lw a1, 0(a1)
; RV32ZBT-NEXT: slti a3, a2, 1025
; RV32ZBT-NEXT: cmov a0, a3, a2, a0
; RV32ZBT-NEXT: sltiu a2, a5, 2047
; RV32ZBT-NEXT: cmov a0, a2, a1, a0
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: foo:
; RV64I: # %bb.0:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: beq a0, a2, .LBB0_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: bne a0, a2, .LBB0_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_4:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: bltu a2, a0, .LBB0_6
; RV64I-NEXT: # %bb.5:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_6:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: bgeu a0, a2, .LBB0_8
; RV64I-NEXT: # %bb.7:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_8:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: bltu a0, a2, .LBB0_10
; RV64I-NEXT: # %bb.9:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_10:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: bgeu a2, a0, .LBB0_12
; RV64I-NEXT: # %bb.11:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_12:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: sext.w a3, a0
; RV64I-NEXT: blt a2, a3, .LBB0_14
; RV64I-NEXT: # %bb.13:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_14:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: sext.w a3, a0
; RV64I-NEXT: bge a3, a2, .LBB0_16
; RV64I-NEXT: # %bb.15:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_16:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: sext.w a3, a0
; RV64I-NEXT: blt a3, a2, .LBB0_18
; RV64I-NEXT: # %bb.17:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_18:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: sext.w a3, a0
; RV64I-NEXT: bge a2, a3, .LBB0_20
; RV64I-NEXT: # %bb.19:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_20:
; RV64I-NEXT: lw a2, 0(a1)
; RV64I-NEXT: blez a2, .LBB0_22
; RV64I-NEXT: # %bb.21:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB0_22:
; RV64I-NEXT: lw a3, 0(a1)
; RV64I-NEXT: bgez a2, .LBB0_24
; RV64I-NEXT: # %bb.23:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB0_24:
; RV64I-NEXT: lw a3, 0(a1)
; RV64I-NEXT: li a4, 1024
; RV64I-NEXT: blt a4, a3, .LBB0_26
; RV64I-NEXT: # %bb.25:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB0_26:
; RV64I-NEXT: lw a1, 0(a1)
; RV64I-NEXT: li a3, 2046
; RV64I-NEXT: bltu a3, a2, .LBB0_28
; RV64I-NEXT: # %bb.27:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: .LBB0_28:
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: foo:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: xor a4, a0, a2
; RV64ZBT-NEXT: cmov a0, a4, a2, a0
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: xor a4, a0, a3
; RV64ZBT-NEXT: cmov a0, a4, a0, a3
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: sltu a4, a2, a0
; RV64ZBT-NEXT: cmov a0, a4, a0, a2
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: sltu a4, a0, a3
; RV64ZBT-NEXT: cmov a0, a4, a3, a0
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: sltu a4, a0, a2
; RV64ZBT-NEXT: cmov a0, a4, a0, a2
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: sltu a4, a3, a0
; RV64ZBT-NEXT: cmov a0, a4, a3, a0
; RV64ZBT-NEXT: sext.w a3, a0
; RV64ZBT-NEXT: slt a3, a2, a3
; RV64ZBT-NEXT: lw a4, 0(a1)
; RV64ZBT-NEXT: cmov a0, a3, a0, a2
; RV64ZBT-NEXT: sext.w a2, a0
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: slt a2, a2, a4
; RV64ZBT-NEXT: cmov a0, a2, a4, a0
; RV64ZBT-NEXT: sext.w a2, a0
; RV64ZBT-NEXT: slt a2, a2, a3
; RV64ZBT-NEXT: lw a4, 0(a1)
; RV64ZBT-NEXT: cmov a0, a2, a0, a3
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: sext.w a3, a0
; RV64ZBT-NEXT: slt a3, a4, a3
; RV64ZBT-NEXT: cmov a0, a3, a4, a0
; RV64ZBT-NEXT: slti a3, a2, 1
; RV64ZBT-NEXT: lw a4, 0(a1)
; RV64ZBT-NEXT: cmov a0, a3, a0, a2
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: slti a5, a2, 0
; RV64ZBT-NEXT: cmov a0, a5, a4, a0
; RV64ZBT-NEXT: lw a1, 0(a1)
; RV64ZBT-NEXT: slti a4, a3, 1025
; RV64ZBT-NEXT: cmov a0, a4, a3, a0
; RV64ZBT-NEXT: sltiu a2, a2, 2047
; RV64ZBT-NEXT: cmov a0, a2, a1, a0
; RV64ZBT-NEXT: sext.w a0, a0
; RV64ZBT-NEXT: ret
%val1 = load volatile i32, i32* %b
%tst1 = icmp eq i32 %a, %val1
%val2 = select i1 %tst1, i32 %a, i32 %val1
%val3 = load volatile i32, i32* %b
%tst2 = icmp ne i32 %val2, %val3
%val4 = select i1 %tst2, i32 %val2, i32 %val3
%val5 = load volatile i32, i32* %b
%tst3 = icmp ugt i32 %val4, %val5
%val6 = select i1 %tst3, i32 %val4, i32 %val5
%val7 = load volatile i32, i32* %b
%tst4 = icmp uge i32 %val6, %val7
%val8 = select i1 %tst4, i32 %val6, i32 %val7
%val9 = load volatile i32, i32* %b
%tst5 = icmp ult i32 %val8, %val9
%val10 = select i1 %tst5, i32 %val8, i32 %val9
%val11 = load volatile i32, i32* %b
%tst6 = icmp ule i32 %val10, %val11
%val12 = select i1 %tst6, i32 %val10, i32 %val11
%val13 = load volatile i32, i32* %b
%tst7 = icmp sgt i32 %val12, %val13
%val14 = select i1 %tst7, i32 %val12, i32 %val13
%val15 = load volatile i32, i32* %b
%tst8 = icmp sge i32 %val14, %val15
%val16 = select i1 %tst8, i32 %val14, i32 %val15
%val17 = load volatile i32, i32* %b
%tst9 = icmp slt i32 %val16, %val17
%val18 = select i1 %tst9, i32 %val16, i32 %val17
%val19 = load volatile i32, i32* %b
%tst10 = icmp sle i32 %val18, %val19
%val20 = select i1 %tst10, i32 %val18, i32 %val19
%val21 = load volatile i32, i32* %b
%tst11 = icmp slt i32 %val21, 1
%val22 = select i1 %tst11, i32 %val20, i32 %val21
%val23 = load volatile i32, i32* %b
%tst12 = icmp sgt i32 %val21, -1
%val24 = select i1 %tst12, i32 %val22, i32 %val23
%val25 = load volatile i32, i32* %b
%tst13 = icmp sgt i32 %val25, 1024
%val26 = select i1 %tst13, i32 %val24, i32 %val25
%val27 = load volatile i32, i32* %b
%tst14 = icmp ugt i32 %val21, 2046
%val28 = select i1 %tst14, i32 %val26, i32 %val27
ret i32 %val28
}
; Test that we can ComputeNumSignBits across basic blocks when the live out is
; RISCVISD::SELECT_CC. There should be no slli+srai or sext.h in the output.
define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2, i16 signext %3) nounwind {
; RV32I-LABEL: numsignbits:
; RV32I: # %bb.0:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-NEXT: mv s0, a3
; RV32I-NEXT: beqz a0, .LBB1_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv s0, a2
; RV32I-NEXT: .LBB1_2:
; RV32I-NEXT: beqz a1, .LBB1_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: call bar@plt
; RV32I-NEXT: .LBB1_4:
; RV32I-NEXT: mv a0, s0
; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: numsignbits:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: addi sp, sp, -16
; RV32ZBT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZBT-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ZBT-NEXT: cmov s0, a0, a2, a3
; RV32ZBT-NEXT: beqz a1, .LBB1_2
; RV32ZBT-NEXT: # %bb.1:
; RV32ZBT-NEXT: mv a0, s0
; RV32ZBT-NEXT: call bar@plt
; RV32ZBT-NEXT: .LBB1_2:
; RV32ZBT-NEXT: mv a0, s0
; RV32ZBT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZBT-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZBT-NEXT: addi sp, sp, 16
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: numsignbits:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-NEXT: mv s0, a3
; RV64I-NEXT: beqz a0, .LBB1_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv s0, a2
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: beqz a1, .LBB1_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: call bar@plt
; RV64I-NEXT: .LBB1_4:
; RV64I-NEXT: mv a0, s0
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: numsignbits:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: addi sp, sp, -16
; RV64ZBT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ZBT-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64ZBT-NEXT: cmov s0, a0, a2, a3
; RV64ZBT-NEXT: beqz a1, .LBB1_2
; RV64ZBT-NEXT: # %bb.1:
; RV64ZBT-NEXT: mv a0, s0
; RV64ZBT-NEXT: call bar@plt
; RV64ZBT-NEXT: .LBB1_2:
; RV64ZBT-NEXT: mv a0, s0
; RV64ZBT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ZBT-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64ZBT-NEXT: addi sp, sp, 16
; RV64ZBT-NEXT: ret
%5 = icmp eq i16 %0, 0
%6 = select i1 %5, i16 %3, i16 %2
%7 = icmp eq i16 %1, 0
br i1 %7, label %9, label %8
8: ; preds = %4
tail call void @bar(i16 signext %6)
br label %9
9: ; preds = %8, %4
ret i16 %6
}
declare void @bar(i16 signext)
define i32 @select_sge_int16min(i32 signext %x, i32 signext %y, i32 signext %z) {
; RV32I-LABEL: select_sge_int16min:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a3, 1048560
; RV32I-NEXT: addi a3, a3, -1
; RV32I-NEXT: blt a3, a0, .LBB2_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: .LBB2_2:
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: select_sge_int16min:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: lui a3, 1048560
; RV32ZBT-NEXT: addi a3, a3, -1
; RV32ZBT-NEXT: slt a0, a3, a0
; RV32ZBT-NEXT: cmov a0, a0, a1, a2
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: select_sge_int16min:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 1048560
; RV64I-NEXT: addiw a3, a3, -1
; RV64I-NEXT: blt a3, a0, .LBB2_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: .LBB2_2:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: select_sge_int16min:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: lui a3, 1048560
; RV64ZBT-NEXT: addiw a3, a3, -1
; RV64ZBT-NEXT: slt a0, a3, a0
; RV64ZBT-NEXT: cmov a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%a = icmp sge i32 %x, -65536
%b = select i1 %a, i32 %y, i32 %z
ret i32 %b
}
define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) {
; RV32I-LABEL: select_sge_int32min:
; RV32I: # %bb.0:
; RV32I-NEXT: li a6, -1
; RV32I-NEXT: bne a1, a6, .LBB3_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slti a0, a0, 0
; RV32I-NEXT: j .LBB3_3
; RV32I-NEXT: .LBB3_2:
; RV32I-NEXT: slti a0, a1, 0
; RV32I-NEXT: xori a0, a0, 1
; RV32I-NEXT: .LBB3_3:
; RV32I-NEXT: bnez a0, .LBB3_5
; RV32I-NEXT: # %bb.4:
; RV32I-NEXT: mv a2, a4
; RV32I-NEXT: mv a3, a5
; RV32I-NEXT: .LBB3_5:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: select_sge_int32min:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slti a0, a0, 0
; RV32ZBT-NEXT: addi a6, a1, 1
; RV32ZBT-NEXT: slti a1, a1, 0
; RV32ZBT-NEXT: xori a1, a1, 1
; RV32ZBT-NEXT: cmov a1, a6, a1, a0
; RV32ZBT-NEXT: cmov a0, a1, a2, a4
; RV32ZBT-NEXT: cmov a1, a1, a3, a5
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: select_sge_int32min:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 524288
; RV64I-NEXT: addi a3, a3, -1
; RV64I-NEXT: blt a3, a0, .LBB3_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: .LBB3_2:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: select_sge_int32min:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: lui a3, 524288
; RV64ZBT-NEXT: addi a3, a3, -1
; RV64ZBT-NEXT: slt a0, a3, a0
; RV64ZBT-NEXT: cmov a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%a = icmp sge i64 %x, -2147483648
%b = select i1 %a, i64 %y, i64 %z
ret i64 %b
}