Files
clang-p2996/llvm/test/CodeGen/RISCV/abds.ll
Ruiling, Song 0487db1f13 MachineScheduler: Improve instruction clustering (#137784)
The existing way of managing clustered nodes was done through adding
weak edges between the neighbouring cluster nodes, which is a sort of
ordered queue. And this will be later recorded as `NextClusterPred` or
`NextClusterSucc` in `ScheduleDAGMI`.

But actually the instruction may be picked not in the exact order of the
queue. For example, we have a queue of cluster nodes A B C. But during
scheduling, node B might be picked first, then it will be very likely
that we only cluster B and C for Top-Down scheduling (leaving A alone).

Another issue is:
```
   if (!ReorderWhileClustering && SUa->NodeNum > SUb->NodeNum)
      std::swap(SUa, SUb);
   if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)))
```
may break the cluster queue.

For example, we want to cluster nodes (order as in `MemOpRecords`): 1 3
2. 1(SUa) will be pred of 3(SUb) normally. But when it comes to (3, 2),
As 3(SUa) > 2(SUb), we would reorder the two nodes, which makes 2 be
pred of 3. This makes both 1 and 2 become preds of 3, but there is no
edge between 1 and 2. Thus we get a broken cluster chain.

To fix both issues, we introduce an unordered set in the change. This
could help improve clustering in some hard case.

One key reason the change causes so many test check changes is: As the
cluster candidates are not ordered now, the candidates might be picked
in different order from before.

The most affected targets are: AMDGPU, AArch64, RISCV.

For RISCV, it seems to me most are just minor instruction reorder, don't
see obvious regression.

For AArch64, there were some combining of ldr into ldp being affected.
With two cases being regressed and two being improved. This has more
deeper reason that machine scheduler cannot cluster them well both
before and after the change, and the load combine algorithm later is
also not smart enough.

For AMDGPU, some cases have more v_dual instructions used while some are
regressed. It seems less critical. Seems like test `v_vselect_v32bf16`
gets more buffer_load being claused.
2025-06-05 15:28:04 +08:00

2751 lines
78 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 | FileCheck %s --check-prefixes=RV32I
; RUN: llc < %s -mtriple=riscv64 | FileCheck %s --check-prefixes=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+zbb | FileCheck %s --check-prefixes=ZBB,RV32ZBB
; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s --check-prefixes=ZBB,RV64ZBB
;
; trunc(abs(sub(sext(a),sext(b)))) -> abds(a,b)
;
define i8 @abd_ext_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_ext_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 24
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a1, a1, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i8:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a1, a1, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_ext_i8:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.b a1, a1
; ZBB-NEXT: sext.b a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%aext = sext i8 %a to i64
%bext = sext i8 %b to i64
%sub = sub i64 %aext, %bext
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
%trunc = trunc i64 %abs to i8
ret i8 %trunc
}
define i8 @abd_ext_i8_i16(i8 %a, i16 %b) nounwind {
; RV32I-LABEL: abd_ext_i8_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a1, a1, 16
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i8_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a1, a1, 48
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_ext_i8_i16:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.h a1, a1
; ZBB-NEXT: sext.b a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%aext = sext i8 %a to i64
%bext = sext i16 %b to i64
%sub = sub i64 %aext, %bext
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
%trunc = trunc i64 %abs to i8
ret i8 %trunc
}
define i8 @abd_ext_i8_undef(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_ext_i8_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 24
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a1, a1, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i8_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a1, a1, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_ext_i8_undef:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.b a1, a1
; ZBB-NEXT: sext.b a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%aext = sext i8 %a to i64
%bext = sext i8 %b to i64
%sub = sub i64 %aext, %bext
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
%trunc = trunc i64 %abs to i8
ret i8 %trunc
}
define i16 @abd_ext_i16(i16 %a, i16 %b) nounwind {
; RV32I-LABEL: abd_ext_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a1, a1, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a1, a1, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_ext_i16:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.h a1, a1
; ZBB-NEXT: sext.h a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%aext = sext i16 %a to i64
%bext = sext i16 %b to i64
%sub = sub i64 %aext, %bext
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
%trunc = trunc i64 %abs to i16
ret i16 %trunc
}
define i16 @abd_ext_i16_i32(i16 %a, i32 %b) nounwind {
; RV32I-LABEL: abd_ext_i16_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: blt a1, a0, .LBB4_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB4_2:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i16_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_ext_i16_i32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sext.h a0, a0
; RV32ZBB-NEXT: min a2, a0, a1
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_ext_i16_i32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a1
; RV64ZBB-NEXT: sext.h a0, a0
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%aext = sext i16 %a to i64
%bext = sext i32 %b to i64
%sub = sub i64 %aext, %bext
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
%trunc = trunc i64 %abs to i16
ret i16 %trunc
}
define i16 @abd_ext_i16_undef(i16 %a, i16 %b) nounwind {
; RV32I-LABEL: abd_ext_i16_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a1, a1, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i16_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a1, a1, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_ext_i16_undef:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.h a1, a1
; ZBB-NEXT: sext.h a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%aext = sext i16 %a to i64
%bext = sext i16 %b to i64
%sub = sub i64 %aext, %bext
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
%trunc = trunc i64 %abs to i16
ret i16 %trunc
}
define i32 @abd_ext_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: abd_ext_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: blt a1, a0, .LBB6_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB6_2:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_ext_i32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: min a2, a0, a1
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_ext_i32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a1
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
%sub = sub i64 %aext, %bext
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
%trunc = trunc i64 %abs to i32
ret i32 %trunc
}
define i32 @abd_ext_i32_i16(i32 %a, i16 %b) nounwind {
; RV32I-LABEL: abd_ext_i32_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: srai a1, a1, 16
; RV32I-NEXT: blt a1, a0, .LBB7_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB7_2:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i32_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: srai a1, a1, 48
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_ext_i32_i16:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sext.h a1, a1
; RV32ZBB-NEXT: min a2, a0, a1
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_ext_i32_i16:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: sext.h a1, a1
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i16 %b to i64
%sub = sub i64 %aext, %bext
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
%trunc = trunc i64 %abs to i32
ret i32 %trunc
}
define i32 @abd_ext_i32_undef(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: abd_ext_i32_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: blt a1, a0, .LBB8_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB8_2:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i32_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_ext_i32_undef:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: min a2, a0, a1
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_ext_i32_undef:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a1
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%aext = sext i32 %a to i64
%bext = sext i32 %b to i64
%sub = sub i64 %aext, %bext
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
%trunc = trunc i64 %abs to i32
ret i32 %trunc
}
define i64 @abd_ext_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: abd_ext_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a4, a2, a0
; RV32I-NEXT: mv a5, a4
; RV32I-NEXT: beq a1, a3, .LBB9_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt a5, a3, a1
; RV32I-NEXT: .LBB9_2:
; RV32I-NEXT: bnez a5, .LBB9_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB9_4:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: blt a1, a0, .LBB9_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB9_2:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_ext_i64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sltu a4, a2, a0
; RV32ZBB-NEXT: mv a5, a4
; RV32ZBB-NEXT: beq a1, a3, .LBB9_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt a5, a3, a1
; RV32ZBB-NEXT: .LBB9_2:
; RV32ZBB-NEXT: bnez a5, .LBB9_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a2, a0
; RV32ZBB-NEXT: ret
; RV32ZBB-NEXT: .LBB9_4:
; RV32ZBB-NEXT: sltu a4, a0, a2
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_ext_i64:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
%sub = sub i128 %aext, %bext
%abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
%trunc = trunc i128 %abs to i64
ret i64 %trunc
}
define i64 @abd_ext_i64_undef(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: abd_ext_i64_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a4, a2, a0
; RV32I-NEXT: mv a5, a4
; RV32I-NEXT: beq a1, a3, .LBB10_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt a5, a3, a1
; RV32I-NEXT: .LBB10_2:
; RV32I-NEXT: bnez a5, .LBB10_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB10_4:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i64_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: blt a1, a0, .LBB10_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB10_2:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_ext_i64_undef:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sltu a4, a2, a0
; RV32ZBB-NEXT: mv a5, a4
; RV32ZBB-NEXT: beq a1, a3, .LBB10_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt a5, a3, a1
; RV32ZBB-NEXT: .LBB10_2:
; RV32ZBB-NEXT: bnez a5, .LBB10_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a2, a0
; RV32ZBB-NEXT: ret
; RV32ZBB-NEXT: .LBB10_4:
; RV32ZBB-NEXT: sltu a4, a0, a2
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_ext_i64_undef:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%aext = sext i64 %a to i128
%bext = sext i64 %b to i128
%sub = sub i128 %aext, %bext
%abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
%trunc = trunc i128 %abs to i64
ret i64 %trunc
}
define i128 @abd_ext_i128(i128 %a, i128 %b) nounwind {
; RV32I-LABEL: abd_ext_i128:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: lw a5, 8(a1)
; RV32I-NEXT: lw a7, 12(a1)
; RV32I-NEXT: lw a1, 0(a2)
; RV32I-NEXT: lw a6, 8(a2)
; RV32I-NEXT: lw t1, 12(a2)
; RV32I-NEXT: lw a2, 4(a2)
; RV32I-NEXT: sltu t0, a6, a5
; RV32I-NEXT: mv t4, t0
; RV32I-NEXT: beq a7, t1, .LBB11_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt t4, t1, a7
; RV32I-NEXT: .LBB11_2:
; RV32I-NEXT: sltu t2, a1, a3
; RV32I-NEXT: sltu t5, a2, a4
; RV32I-NEXT: mv t3, t2
; RV32I-NEXT: beq a4, a2, .LBB11_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv t3, t5
; RV32I-NEXT: .LBB11_4:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: xor t6, a7, t1
; RV32I-NEXT: xor s0, a5, a6
; RV32I-NEXT: or t6, s0, t6
; RV32I-NEXT: beqz t6, .LBB11_6
; RV32I-NEXT: # %bb.5:
; RV32I-NEXT: mv t3, t4
; RV32I-NEXT: .LBB11_6:
; RV32I-NEXT: mv t4, t2
; RV32I-NEXT: beq a2, a4, .LBB11_8
; RV32I-NEXT: # %bb.7:
; RV32I-NEXT: mv t4, t5
; RV32I-NEXT: .LBB11_8:
; RV32I-NEXT: sltu t5, a3, a1
; RV32I-NEXT: mv t6, t5
; RV32I-NEXT: beq a4, a2, .LBB11_10
; RV32I-NEXT: # %bb.9:
; RV32I-NEXT: sltu t6, a4, a2
; RV32I-NEXT: .LBB11_10:
; RV32I-NEXT: bnez t3, .LBB11_12
; RV32I-NEXT: # %bb.11:
; RV32I-NEXT: sub a7, t1, a7
; RV32I-NEXT: sub a5, a6, a5
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a2, a2, a4
; RV32I-NEXT: sub a4, a7, t0
; RV32I-NEXT: sltu a6, a5, t4
; RV32I-NEXT: sub a3, a2, t2
; RV32I-NEXT: sub a2, a4, a6
; RV32I-NEXT: sub a4, a5, t4
; RV32I-NEXT: j .LBB11_13
; RV32I-NEXT: .LBB11_12:
; RV32I-NEXT: sltu t0, a5, a6
; RV32I-NEXT: sub a7, a7, t1
; RV32I-NEXT: sub a5, a5, a6
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a4, a4, a2
; RV32I-NEXT: sub a2, a7, t0
; RV32I-NEXT: sltu a6, a5, t6
; RV32I-NEXT: sub a3, a4, t5
; RV32I-NEXT: sub a2, a2, a6
; RV32I-NEXT: sub a4, a5, t6
; RV32I-NEXT: .LBB11_13:
; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: sw a3, 4(a0)
; RV32I-NEXT: sw a4, 8(a0)
; RV32I-NEXT: sw a2, 12(a0)
; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i128:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a4, a2, a0
; RV64I-NEXT: mv a5, a4
; RV64I-NEXT: beq a1, a3, .LBB11_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: slt a5, a3, a1
; RV64I-NEXT: .LBB11_2:
; RV64I-NEXT: bnez a5, .LBB11_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: sub a1, a3, a1
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a2, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB11_4:
; RV64I-NEXT: sltu a4, a0, a2
; RV64I-NEXT: sub a1, a1, a3
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_ext_i128:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a3, 0(a1)
; RV32ZBB-NEXT: lw a4, 4(a1)
; RV32ZBB-NEXT: lw a5, 8(a1)
; RV32ZBB-NEXT: lw a7, 12(a1)
; RV32ZBB-NEXT: lw a1, 0(a2)
; RV32ZBB-NEXT: lw a6, 8(a2)
; RV32ZBB-NEXT: lw t1, 12(a2)
; RV32ZBB-NEXT: lw a2, 4(a2)
; RV32ZBB-NEXT: sltu t0, a6, a5
; RV32ZBB-NEXT: mv t4, t0
; RV32ZBB-NEXT: beq a7, t1, .LBB11_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt t4, t1, a7
; RV32ZBB-NEXT: .LBB11_2:
; RV32ZBB-NEXT: sltu t2, a1, a3
; RV32ZBB-NEXT: sltu t5, a2, a4
; RV32ZBB-NEXT: mv t3, t2
; RV32ZBB-NEXT: beq a4, a2, .LBB11_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: mv t3, t5
; RV32ZBB-NEXT: .LBB11_4:
; RV32ZBB-NEXT: addi sp, sp, -16
; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32ZBB-NEXT: xor t6, a7, t1
; RV32ZBB-NEXT: xor s0, a5, a6
; RV32ZBB-NEXT: or t6, s0, t6
; RV32ZBB-NEXT: beqz t6, .LBB11_6
; RV32ZBB-NEXT: # %bb.5:
; RV32ZBB-NEXT: mv t3, t4
; RV32ZBB-NEXT: .LBB11_6:
; RV32ZBB-NEXT: mv t4, t2
; RV32ZBB-NEXT: beq a2, a4, .LBB11_8
; RV32ZBB-NEXT: # %bb.7:
; RV32ZBB-NEXT: mv t4, t5
; RV32ZBB-NEXT: .LBB11_8:
; RV32ZBB-NEXT: sltu t5, a3, a1
; RV32ZBB-NEXT: mv t6, t5
; RV32ZBB-NEXT: beq a4, a2, .LBB11_10
; RV32ZBB-NEXT: # %bb.9:
; RV32ZBB-NEXT: sltu t6, a4, a2
; RV32ZBB-NEXT: .LBB11_10:
; RV32ZBB-NEXT: bnez t3, .LBB11_12
; RV32ZBB-NEXT: # %bb.11:
; RV32ZBB-NEXT: sub a7, t1, a7
; RV32ZBB-NEXT: sub a5, a6, a5
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a2, a2, a4
; RV32ZBB-NEXT: sub a4, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t4
; RV32ZBB-NEXT: sub a3, a2, t2
; RV32ZBB-NEXT: sub a2, a4, a6
; RV32ZBB-NEXT: sub a4, a5, t4
; RV32ZBB-NEXT: j .LBB11_13
; RV32ZBB-NEXT: .LBB11_12:
; RV32ZBB-NEXT: sltu t0, a5, a6
; RV32ZBB-NEXT: sub a7, a7, t1
; RV32ZBB-NEXT: sub a5, a5, a6
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a4, a4, a2
; RV32ZBB-NEXT: sub a2, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t6
; RV32ZBB-NEXT: sub a3, a4, t5
; RV32ZBB-NEXT: sub a2, a2, a6
; RV32ZBB-NEXT: sub a4, a5, t6
; RV32ZBB-NEXT: .LBB11_13:
; RV32ZBB-NEXT: sw a1, 0(a0)
; RV32ZBB-NEXT: sw a3, 4(a0)
; RV32ZBB-NEXT: sw a4, 8(a0)
; RV32ZBB-NEXT: sw a2, 12(a0)
; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32ZBB-NEXT: addi sp, sp, 16
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_ext_i128:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sltu a4, a2, a0
; RV64ZBB-NEXT: mv a5, a4
; RV64ZBB-NEXT: beq a1, a3, .LBB11_2
; RV64ZBB-NEXT: # %bb.1:
; RV64ZBB-NEXT: slt a5, a3, a1
; RV64ZBB-NEXT: .LBB11_2:
; RV64ZBB-NEXT: bnez a5, .LBB11_4
; RV64ZBB-NEXT: # %bb.3:
; RV64ZBB-NEXT: sub a1, a3, a1
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a2, a0
; RV64ZBB-NEXT: ret
; RV64ZBB-NEXT: .LBB11_4:
; RV64ZBB-NEXT: sltu a4, a0, a2
; RV64ZBB-NEXT: sub a1, a1, a3
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%aext = sext i128 %a to i256
%bext = sext i128 %b to i256
%sub = sub i256 %aext, %bext
%abs = call i256 @llvm.abs.i256(i256 %sub, i1 false)
%trunc = trunc i256 %abs to i128
ret i128 %trunc
}
define i128 @abd_ext_i128_undef(i128 %a, i128 %b) nounwind {
; RV32I-LABEL: abd_ext_i128_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: lw a5, 8(a1)
; RV32I-NEXT: lw a7, 12(a1)
; RV32I-NEXT: lw a1, 0(a2)
; RV32I-NEXT: lw a6, 8(a2)
; RV32I-NEXT: lw t1, 12(a2)
; RV32I-NEXT: lw a2, 4(a2)
; RV32I-NEXT: sltu t0, a6, a5
; RV32I-NEXT: mv t4, t0
; RV32I-NEXT: beq a7, t1, .LBB12_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt t4, t1, a7
; RV32I-NEXT: .LBB12_2:
; RV32I-NEXT: sltu t2, a1, a3
; RV32I-NEXT: sltu t5, a2, a4
; RV32I-NEXT: mv t3, t2
; RV32I-NEXT: beq a4, a2, .LBB12_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv t3, t5
; RV32I-NEXT: .LBB12_4:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: xor t6, a7, t1
; RV32I-NEXT: xor s0, a5, a6
; RV32I-NEXT: or t6, s0, t6
; RV32I-NEXT: beqz t6, .LBB12_6
; RV32I-NEXT: # %bb.5:
; RV32I-NEXT: mv t3, t4
; RV32I-NEXT: .LBB12_6:
; RV32I-NEXT: mv t4, t2
; RV32I-NEXT: beq a2, a4, .LBB12_8
; RV32I-NEXT: # %bb.7:
; RV32I-NEXT: mv t4, t5
; RV32I-NEXT: .LBB12_8:
; RV32I-NEXT: sltu t5, a3, a1
; RV32I-NEXT: mv t6, t5
; RV32I-NEXT: beq a4, a2, .LBB12_10
; RV32I-NEXT: # %bb.9:
; RV32I-NEXT: sltu t6, a4, a2
; RV32I-NEXT: .LBB12_10:
; RV32I-NEXT: bnez t3, .LBB12_12
; RV32I-NEXT: # %bb.11:
; RV32I-NEXT: sub a7, t1, a7
; RV32I-NEXT: sub a5, a6, a5
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a2, a2, a4
; RV32I-NEXT: sub a4, a7, t0
; RV32I-NEXT: sltu a6, a5, t4
; RV32I-NEXT: sub a3, a2, t2
; RV32I-NEXT: sub a2, a4, a6
; RV32I-NEXT: sub a4, a5, t4
; RV32I-NEXT: j .LBB12_13
; RV32I-NEXT: .LBB12_12:
; RV32I-NEXT: sltu t0, a5, a6
; RV32I-NEXT: sub a7, a7, t1
; RV32I-NEXT: sub a5, a5, a6
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a4, a4, a2
; RV32I-NEXT: sub a2, a7, t0
; RV32I-NEXT: sltu a6, a5, t6
; RV32I-NEXT: sub a3, a4, t5
; RV32I-NEXT: sub a2, a2, a6
; RV32I-NEXT: sub a4, a5, t6
; RV32I-NEXT: .LBB12_13:
; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: sw a3, 4(a0)
; RV32I-NEXT: sw a4, 8(a0)
; RV32I-NEXT: sw a2, 12(a0)
; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_ext_i128_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a4, a2, a0
; RV64I-NEXT: mv a5, a4
; RV64I-NEXT: beq a1, a3, .LBB12_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: slt a5, a3, a1
; RV64I-NEXT: .LBB12_2:
; RV64I-NEXT: bnez a5, .LBB12_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: sub a1, a3, a1
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a2, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB12_4:
; RV64I-NEXT: sltu a4, a0, a2
; RV64I-NEXT: sub a1, a1, a3
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_ext_i128_undef:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a3, 0(a1)
; RV32ZBB-NEXT: lw a4, 4(a1)
; RV32ZBB-NEXT: lw a5, 8(a1)
; RV32ZBB-NEXT: lw a7, 12(a1)
; RV32ZBB-NEXT: lw a1, 0(a2)
; RV32ZBB-NEXT: lw a6, 8(a2)
; RV32ZBB-NEXT: lw t1, 12(a2)
; RV32ZBB-NEXT: lw a2, 4(a2)
; RV32ZBB-NEXT: sltu t0, a6, a5
; RV32ZBB-NEXT: mv t4, t0
; RV32ZBB-NEXT: beq a7, t1, .LBB12_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt t4, t1, a7
; RV32ZBB-NEXT: .LBB12_2:
; RV32ZBB-NEXT: sltu t2, a1, a3
; RV32ZBB-NEXT: sltu t5, a2, a4
; RV32ZBB-NEXT: mv t3, t2
; RV32ZBB-NEXT: beq a4, a2, .LBB12_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: mv t3, t5
; RV32ZBB-NEXT: .LBB12_4:
; RV32ZBB-NEXT: addi sp, sp, -16
; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32ZBB-NEXT: xor t6, a7, t1
; RV32ZBB-NEXT: xor s0, a5, a6
; RV32ZBB-NEXT: or t6, s0, t6
; RV32ZBB-NEXT: beqz t6, .LBB12_6
; RV32ZBB-NEXT: # %bb.5:
; RV32ZBB-NEXT: mv t3, t4
; RV32ZBB-NEXT: .LBB12_6:
; RV32ZBB-NEXT: mv t4, t2
; RV32ZBB-NEXT: beq a2, a4, .LBB12_8
; RV32ZBB-NEXT: # %bb.7:
; RV32ZBB-NEXT: mv t4, t5
; RV32ZBB-NEXT: .LBB12_8:
; RV32ZBB-NEXT: sltu t5, a3, a1
; RV32ZBB-NEXT: mv t6, t5
; RV32ZBB-NEXT: beq a4, a2, .LBB12_10
; RV32ZBB-NEXT: # %bb.9:
; RV32ZBB-NEXT: sltu t6, a4, a2
; RV32ZBB-NEXT: .LBB12_10:
; RV32ZBB-NEXT: bnez t3, .LBB12_12
; RV32ZBB-NEXT: # %bb.11:
; RV32ZBB-NEXT: sub a7, t1, a7
; RV32ZBB-NEXT: sub a5, a6, a5
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a2, a2, a4
; RV32ZBB-NEXT: sub a4, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t4
; RV32ZBB-NEXT: sub a3, a2, t2
; RV32ZBB-NEXT: sub a2, a4, a6
; RV32ZBB-NEXT: sub a4, a5, t4
; RV32ZBB-NEXT: j .LBB12_13
; RV32ZBB-NEXT: .LBB12_12:
; RV32ZBB-NEXT: sltu t0, a5, a6
; RV32ZBB-NEXT: sub a7, a7, t1
; RV32ZBB-NEXT: sub a5, a5, a6
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a4, a4, a2
; RV32ZBB-NEXT: sub a2, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t6
; RV32ZBB-NEXT: sub a3, a4, t5
; RV32ZBB-NEXT: sub a2, a2, a6
; RV32ZBB-NEXT: sub a4, a5, t6
; RV32ZBB-NEXT: .LBB12_13:
; RV32ZBB-NEXT: sw a1, 0(a0)
; RV32ZBB-NEXT: sw a3, 4(a0)
; RV32ZBB-NEXT: sw a4, 8(a0)
; RV32ZBB-NEXT: sw a2, 12(a0)
; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32ZBB-NEXT: addi sp, sp, 16
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_ext_i128_undef:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sltu a4, a2, a0
; RV64ZBB-NEXT: mv a5, a4
; RV64ZBB-NEXT: beq a1, a3, .LBB12_2
; RV64ZBB-NEXT: # %bb.1:
; RV64ZBB-NEXT: slt a5, a3, a1
; RV64ZBB-NEXT: .LBB12_2:
; RV64ZBB-NEXT: bnez a5, .LBB12_4
; RV64ZBB-NEXT: # %bb.3:
; RV64ZBB-NEXT: sub a1, a3, a1
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a2, a0
; RV64ZBB-NEXT: ret
; RV64ZBB-NEXT: .LBB12_4:
; RV64ZBB-NEXT: sltu a4, a0, a2
; RV64ZBB-NEXT: sub a1, a1, a3
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%aext = sext i128 %a to i256
%bext = sext i128 %b to i256
%sub = sub i256 %aext, %bext
%abs = call i256 @llvm.abs.i256(i256 %sub, i1 true)
%trunc = trunc i256 %abs to i128
ret i128 %trunc
}
;
; sub(smax(a,b),smin(a,b)) -> abds(a,b)
;
define i8 @abd_minmax_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_minmax_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 24
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a1, a1, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_minmax_i8:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a1, a1, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_minmax_i8:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.b a1, a1
; ZBB-NEXT: sext.b a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%min = call i8 @llvm.smin.i8(i8 %a, i8 %b)
%max = call i8 @llvm.smax.i8(i8 %a, i8 %b)
%sub = sub i8 %max, %min
ret i8 %sub
}
define i16 @abd_minmax_i16(i16 %a, i16 %b) nounwind {
; RV32I-LABEL: abd_minmax_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a1, a1, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_minmax_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a1, a1, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_minmax_i16:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.h a1, a1
; ZBB-NEXT: sext.h a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%min = call i16 @llvm.smin.i16(i16 %a, i16 %b)
%max = call i16 @llvm.smax.i16(i16 %a, i16 %b)
%sub = sub i16 %max, %min
ret i16 %sub
}
define i32 @abd_minmax_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: abd_minmax_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: blt a1, a0, .LBB15_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB15_2:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_minmax_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_minmax_i32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: min a2, a0, a1
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_minmax_i32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a1
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%min = call i32 @llvm.smin.i32(i32 %a, i32 %b)
%max = call i32 @llvm.smax.i32(i32 %a, i32 %b)
%sub = sub i32 %max, %min
ret i32 %sub
}
define i64 @abd_minmax_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: abd_minmax_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a4, a2, a0
; RV32I-NEXT: mv a5, a4
; RV32I-NEXT: beq a1, a3, .LBB16_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt a5, a3, a1
; RV32I-NEXT: .LBB16_2:
; RV32I-NEXT: bnez a5, .LBB16_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB16_4:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_minmax_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: blt a1, a0, .LBB16_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB16_2:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_minmax_i64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sltu a4, a2, a0
; RV32ZBB-NEXT: mv a5, a4
; RV32ZBB-NEXT: beq a1, a3, .LBB16_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt a5, a3, a1
; RV32ZBB-NEXT: .LBB16_2:
; RV32ZBB-NEXT: bnez a5, .LBB16_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a2, a0
; RV32ZBB-NEXT: ret
; RV32ZBB-NEXT: .LBB16_4:
; RV32ZBB-NEXT: sltu a4, a0, a2
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_minmax_i64:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%min = call i64 @llvm.smin.i64(i64 %a, i64 %b)
%max = call i64 @llvm.smax.i64(i64 %a, i64 %b)
%sub = sub i64 %max, %min
ret i64 %sub
}
define i128 @abd_minmax_i128(i128 %a, i128 %b) nounwind {
; RV32I-LABEL: abd_minmax_i128:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: lw a5, 8(a1)
; RV32I-NEXT: lw a7, 12(a1)
; RV32I-NEXT: lw a1, 0(a2)
; RV32I-NEXT: lw a6, 8(a2)
; RV32I-NEXT: lw t1, 12(a2)
; RV32I-NEXT: lw a2, 4(a2)
; RV32I-NEXT: sltu t0, a6, a5
; RV32I-NEXT: mv t4, t0
; RV32I-NEXT: beq a7, t1, .LBB17_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt t4, t1, a7
; RV32I-NEXT: .LBB17_2:
; RV32I-NEXT: sltu t2, a1, a3
; RV32I-NEXT: sltu t5, a2, a4
; RV32I-NEXT: mv t3, t2
; RV32I-NEXT: beq a4, a2, .LBB17_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv t3, t5
; RV32I-NEXT: .LBB17_4:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: xor t6, a7, t1
; RV32I-NEXT: xor s0, a5, a6
; RV32I-NEXT: or t6, s0, t6
; RV32I-NEXT: beqz t6, .LBB17_6
; RV32I-NEXT: # %bb.5:
; RV32I-NEXT: mv t3, t4
; RV32I-NEXT: .LBB17_6:
; RV32I-NEXT: mv t4, t2
; RV32I-NEXT: beq a2, a4, .LBB17_8
; RV32I-NEXT: # %bb.7:
; RV32I-NEXT: mv t4, t5
; RV32I-NEXT: .LBB17_8:
; RV32I-NEXT: sltu t5, a3, a1
; RV32I-NEXT: mv t6, t5
; RV32I-NEXT: beq a4, a2, .LBB17_10
; RV32I-NEXT: # %bb.9:
; RV32I-NEXT: sltu t6, a4, a2
; RV32I-NEXT: .LBB17_10:
; RV32I-NEXT: bnez t3, .LBB17_12
; RV32I-NEXT: # %bb.11:
; RV32I-NEXT: sub a7, t1, a7
; RV32I-NEXT: sub a5, a6, a5
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a2, a2, a4
; RV32I-NEXT: sub a4, a7, t0
; RV32I-NEXT: sltu a6, a5, t4
; RV32I-NEXT: sub a3, a2, t2
; RV32I-NEXT: sub a2, a4, a6
; RV32I-NEXT: sub a4, a5, t4
; RV32I-NEXT: j .LBB17_13
; RV32I-NEXT: .LBB17_12:
; RV32I-NEXT: sltu t0, a5, a6
; RV32I-NEXT: sub a7, a7, t1
; RV32I-NEXT: sub a5, a5, a6
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a4, a4, a2
; RV32I-NEXT: sub a2, a7, t0
; RV32I-NEXT: sltu a6, a5, t6
; RV32I-NEXT: sub a3, a4, t5
; RV32I-NEXT: sub a2, a2, a6
; RV32I-NEXT: sub a4, a5, t6
; RV32I-NEXT: .LBB17_13:
; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: sw a3, 4(a0)
; RV32I-NEXT: sw a4, 8(a0)
; RV32I-NEXT: sw a2, 12(a0)
; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_minmax_i128:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a4, a2, a0
; RV64I-NEXT: mv a5, a4
; RV64I-NEXT: beq a1, a3, .LBB17_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: slt a5, a3, a1
; RV64I-NEXT: .LBB17_2:
; RV64I-NEXT: bnez a5, .LBB17_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: sub a1, a3, a1
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a2, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB17_4:
; RV64I-NEXT: sltu a4, a0, a2
; RV64I-NEXT: sub a1, a1, a3
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_minmax_i128:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a3, 0(a1)
; RV32ZBB-NEXT: lw a4, 4(a1)
; RV32ZBB-NEXT: lw a5, 8(a1)
; RV32ZBB-NEXT: lw a7, 12(a1)
; RV32ZBB-NEXT: lw a1, 0(a2)
; RV32ZBB-NEXT: lw a6, 8(a2)
; RV32ZBB-NEXT: lw t1, 12(a2)
; RV32ZBB-NEXT: lw a2, 4(a2)
; RV32ZBB-NEXT: sltu t0, a6, a5
; RV32ZBB-NEXT: mv t4, t0
; RV32ZBB-NEXT: beq a7, t1, .LBB17_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt t4, t1, a7
; RV32ZBB-NEXT: .LBB17_2:
; RV32ZBB-NEXT: sltu t2, a1, a3
; RV32ZBB-NEXT: sltu t5, a2, a4
; RV32ZBB-NEXT: mv t3, t2
; RV32ZBB-NEXT: beq a4, a2, .LBB17_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: mv t3, t5
; RV32ZBB-NEXT: .LBB17_4:
; RV32ZBB-NEXT: addi sp, sp, -16
; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32ZBB-NEXT: xor t6, a7, t1
; RV32ZBB-NEXT: xor s0, a5, a6
; RV32ZBB-NEXT: or t6, s0, t6
; RV32ZBB-NEXT: beqz t6, .LBB17_6
; RV32ZBB-NEXT: # %bb.5:
; RV32ZBB-NEXT: mv t3, t4
; RV32ZBB-NEXT: .LBB17_6:
; RV32ZBB-NEXT: mv t4, t2
; RV32ZBB-NEXT: beq a2, a4, .LBB17_8
; RV32ZBB-NEXT: # %bb.7:
; RV32ZBB-NEXT: mv t4, t5
; RV32ZBB-NEXT: .LBB17_8:
; RV32ZBB-NEXT: sltu t5, a3, a1
; RV32ZBB-NEXT: mv t6, t5
; RV32ZBB-NEXT: beq a4, a2, .LBB17_10
; RV32ZBB-NEXT: # %bb.9:
; RV32ZBB-NEXT: sltu t6, a4, a2
; RV32ZBB-NEXT: .LBB17_10:
; RV32ZBB-NEXT: bnez t3, .LBB17_12
; RV32ZBB-NEXT: # %bb.11:
; RV32ZBB-NEXT: sub a7, t1, a7
; RV32ZBB-NEXT: sub a5, a6, a5
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a2, a2, a4
; RV32ZBB-NEXT: sub a4, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t4
; RV32ZBB-NEXT: sub a3, a2, t2
; RV32ZBB-NEXT: sub a2, a4, a6
; RV32ZBB-NEXT: sub a4, a5, t4
; RV32ZBB-NEXT: j .LBB17_13
; RV32ZBB-NEXT: .LBB17_12:
; RV32ZBB-NEXT: sltu t0, a5, a6
; RV32ZBB-NEXT: sub a7, a7, t1
; RV32ZBB-NEXT: sub a5, a5, a6
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a4, a4, a2
; RV32ZBB-NEXT: sub a2, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t6
; RV32ZBB-NEXT: sub a3, a4, t5
; RV32ZBB-NEXT: sub a2, a2, a6
; RV32ZBB-NEXT: sub a4, a5, t6
; RV32ZBB-NEXT: .LBB17_13:
; RV32ZBB-NEXT: sw a1, 0(a0)
; RV32ZBB-NEXT: sw a3, 4(a0)
; RV32ZBB-NEXT: sw a4, 8(a0)
; RV32ZBB-NEXT: sw a2, 12(a0)
; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32ZBB-NEXT: addi sp, sp, 16
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_minmax_i128:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sltu a4, a2, a0
; RV64ZBB-NEXT: mv a5, a4
; RV64ZBB-NEXT: beq a1, a3, .LBB17_2
; RV64ZBB-NEXT: # %bb.1:
; RV64ZBB-NEXT: slt a5, a3, a1
; RV64ZBB-NEXT: .LBB17_2:
; RV64ZBB-NEXT: bnez a5, .LBB17_4
; RV64ZBB-NEXT: # %bb.3:
; RV64ZBB-NEXT: sub a1, a3, a1
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a2, a0
; RV64ZBB-NEXT: ret
; RV64ZBB-NEXT: .LBB17_4:
; RV64ZBB-NEXT: sltu a4, a0, a2
; RV64ZBB-NEXT: sub a1, a1, a3
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%min = call i128 @llvm.smin.i128(i128 %a, i128 %b)
%max = call i128 @llvm.smax.i128(i128 %a, i128 %b)
%sub = sub i128 %max, %min
ret i128 %sub
}
;
; select(icmp(a,b),sub(a,b),sub(b,a)) -> abds(a,b)
;
define i8 @abd_cmp_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_cmp_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 24
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a1, a1, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_cmp_i8:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a1, a1, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_cmp_i8:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.b a1, a1
; ZBB-NEXT: sext.b a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%cmp = icmp sgt i8 %a, %b
%ab = sub i8 %a, %b
%ba = sub i8 %b, %a
%sel = select i1 %cmp, i8 %ab, i8 %ba
ret i8 %sel
}
define i16 @abd_cmp_i16(i16 %a, i16 %b) nounwind {
; RV32I-LABEL: abd_cmp_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a1, a1, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_cmp_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a1, a1, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_cmp_i16:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.h a1, a1
; ZBB-NEXT: sext.h a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%cmp = icmp sge i16 %a, %b
%ab = sub i16 %a, %b
%ba = sub i16 %b, %a
%sel = select i1 %cmp, i16 %ab, i16 %ba
ret i16 %sel
}
define i32 @abd_cmp_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: abd_cmp_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: blt a1, a0, .LBB20_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB20_2:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_cmp_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_cmp_i32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: min a2, a0, a1
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_cmp_i32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a1
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%cmp = icmp slt i32 %a, %b
%ab = sub i32 %a, %b
%ba = sub i32 %b, %a
%sel = select i1 %cmp, i32 %ba, i32 %ab
ret i32 %sel
}
define i64 @abd_cmp_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: abd_cmp_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a4, a2, a0
; RV32I-NEXT: mv a5, a4
; RV32I-NEXT: beq a1, a3, .LBB21_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt a5, a3, a1
; RV32I-NEXT: .LBB21_2:
; RV32I-NEXT: bnez a5, .LBB21_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB21_4:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_cmp_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: blt a1, a0, .LBB21_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB21_2:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_cmp_i64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sltu a4, a2, a0
; RV32ZBB-NEXT: mv a5, a4
; RV32ZBB-NEXT: beq a1, a3, .LBB21_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt a5, a3, a1
; RV32ZBB-NEXT: .LBB21_2:
; RV32ZBB-NEXT: bnez a5, .LBB21_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a2, a0
; RV32ZBB-NEXT: ret
; RV32ZBB-NEXT: .LBB21_4:
; RV32ZBB-NEXT: sltu a4, a0, a2
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_cmp_i64:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%cmp = icmp sge i64 %a, %b
%ab = sub i64 %a, %b
%ba = sub i64 %b, %a
%sel = select i1 %cmp, i64 %ab, i64 %ba
ret i64 %sel
}
define i128 @abd_cmp_i128(i128 %a, i128 %b) nounwind {
; RV32I-LABEL: abd_cmp_i128:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: lw a5, 8(a1)
; RV32I-NEXT: lw a7, 12(a1)
; RV32I-NEXT: lw a1, 0(a2)
; RV32I-NEXT: lw a6, 8(a2)
; RV32I-NEXT: lw t1, 12(a2)
; RV32I-NEXT: lw a2, 4(a2)
; RV32I-NEXT: sltu t0, a6, a5
; RV32I-NEXT: mv t4, t0
; RV32I-NEXT: beq a7, t1, .LBB22_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt t4, t1, a7
; RV32I-NEXT: .LBB22_2:
; RV32I-NEXT: sltu t2, a1, a3
; RV32I-NEXT: sltu t5, a2, a4
; RV32I-NEXT: mv t3, t2
; RV32I-NEXT: beq a4, a2, .LBB22_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv t3, t5
; RV32I-NEXT: .LBB22_4:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: xor t6, a7, t1
; RV32I-NEXT: xor s0, a5, a6
; RV32I-NEXT: or t6, s0, t6
; RV32I-NEXT: beqz t6, .LBB22_6
; RV32I-NEXT: # %bb.5:
; RV32I-NEXT: mv t3, t4
; RV32I-NEXT: .LBB22_6:
; RV32I-NEXT: mv t4, t2
; RV32I-NEXT: beq a2, a4, .LBB22_8
; RV32I-NEXT: # %bb.7:
; RV32I-NEXT: mv t4, t5
; RV32I-NEXT: .LBB22_8:
; RV32I-NEXT: sltu t5, a3, a1
; RV32I-NEXT: mv t6, t5
; RV32I-NEXT: beq a4, a2, .LBB22_10
; RV32I-NEXT: # %bb.9:
; RV32I-NEXT: sltu t6, a4, a2
; RV32I-NEXT: .LBB22_10:
; RV32I-NEXT: bnez t3, .LBB22_12
; RV32I-NEXT: # %bb.11:
; RV32I-NEXT: sub a7, t1, a7
; RV32I-NEXT: sub a5, a6, a5
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a2, a2, a4
; RV32I-NEXT: sub a4, a7, t0
; RV32I-NEXT: sltu a6, a5, t4
; RV32I-NEXT: sub a3, a2, t2
; RV32I-NEXT: sub a2, a4, a6
; RV32I-NEXT: sub a4, a5, t4
; RV32I-NEXT: j .LBB22_13
; RV32I-NEXT: .LBB22_12:
; RV32I-NEXT: sltu t0, a5, a6
; RV32I-NEXT: sub a7, a7, t1
; RV32I-NEXT: sub a5, a5, a6
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a4, a4, a2
; RV32I-NEXT: sub a2, a7, t0
; RV32I-NEXT: sltu a6, a5, t6
; RV32I-NEXT: sub a3, a4, t5
; RV32I-NEXT: sub a2, a2, a6
; RV32I-NEXT: sub a4, a5, t6
; RV32I-NEXT: .LBB22_13:
; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: sw a3, 4(a0)
; RV32I-NEXT: sw a4, 8(a0)
; RV32I-NEXT: sw a2, 12(a0)
; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_cmp_i128:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a4, a2, a0
; RV64I-NEXT: mv a5, a4
; RV64I-NEXT: beq a1, a3, .LBB22_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: slt a5, a3, a1
; RV64I-NEXT: .LBB22_2:
; RV64I-NEXT: bnez a5, .LBB22_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: sub a1, a3, a1
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a2, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB22_4:
; RV64I-NEXT: sltu a4, a0, a2
; RV64I-NEXT: sub a1, a1, a3
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_cmp_i128:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a3, 0(a1)
; RV32ZBB-NEXT: lw a4, 4(a1)
; RV32ZBB-NEXT: lw a5, 8(a1)
; RV32ZBB-NEXT: lw a7, 12(a1)
; RV32ZBB-NEXT: lw a1, 0(a2)
; RV32ZBB-NEXT: lw a6, 8(a2)
; RV32ZBB-NEXT: lw t1, 12(a2)
; RV32ZBB-NEXT: lw a2, 4(a2)
; RV32ZBB-NEXT: sltu t0, a6, a5
; RV32ZBB-NEXT: mv t4, t0
; RV32ZBB-NEXT: beq a7, t1, .LBB22_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt t4, t1, a7
; RV32ZBB-NEXT: .LBB22_2:
; RV32ZBB-NEXT: sltu t2, a1, a3
; RV32ZBB-NEXT: sltu t5, a2, a4
; RV32ZBB-NEXT: mv t3, t2
; RV32ZBB-NEXT: beq a4, a2, .LBB22_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: mv t3, t5
; RV32ZBB-NEXT: .LBB22_4:
; RV32ZBB-NEXT: addi sp, sp, -16
; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32ZBB-NEXT: xor t6, a7, t1
; RV32ZBB-NEXT: xor s0, a5, a6
; RV32ZBB-NEXT: or t6, s0, t6
; RV32ZBB-NEXT: beqz t6, .LBB22_6
; RV32ZBB-NEXT: # %bb.5:
; RV32ZBB-NEXT: mv t3, t4
; RV32ZBB-NEXT: .LBB22_6:
; RV32ZBB-NEXT: mv t4, t2
; RV32ZBB-NEXT: beq a2, a4, .LBB22_8
; RV32ZBB-NEXT: # %bb.7:
; RV32ZBB-NEXT: mv t4, t5
; RV32ZBB-NEXT: .LBB22_8:
; RV32ZBB-NEXT: sltu t5, a3, a1
; RV32ZBB-NEXT: mv t6, t5
; RV32ZBB-NEXT: beq a4, a2, .LBB22_10
; RV32ZBB-NEXT: # %bb.9:
; RV32ZBB-NEXT: sltu t6, a4, a2
; RV32ZBB-NEXT: .LBB22_10:
; RV32ZBB-NEXT: bnez t3, .LBB22_12
; RV32ZBB-NEXT: # %bb.11:
; RV32ZBB-NEXT: sub a7, t1, a7
; RV32ZBB-NEXT: sub a5, a6, a5
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a2, a2, a4
; RV32ZBB-NEXT: sub a4, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t4
; RV32ZBB-NEXT: sub a3, a2, t2
; RV32ZBB-NEXT: sub a2, a4, a6
; RV32ZBB-NEXT: sub a4, a5, t4
; RV32ZBB-NEXT: j .LBB22_13
; RV32ZBB-NEXT: .LBB22_12:
; RV32ZBB-NEXT: sltu t0, a5, a6
; RV32ZBB-NEXT: sub a7, a7, t1
; RV32ZBB-NEXT: sub a5, a5, a6
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a4, a4, a2
; RV32ZBB-NEXT: sub a2, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t6
; RV32ZBB-NEXT: sub a3, a4, t5
; RV32ZBB-NEXT: sub a2, a2, a6
; RV32ZBB-NEXT: sub a4, a5, t6
; RV32ZBB-NEXT: .LBB22_13:
; RV32ZBB-NEXT: sw a1, 0(a0)
; RV32ZBB-NEXT: sw a3, 4(a0)
; RV32ZBB-NEXT: sw a4, 8(a0)
; RV32ZBB-NEXT: sw a2, 12(a0)
; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32ZBB-NEXT: addi sp, sp, 16
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_cmp_i128:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sltu a4, a2, a0
; RV64ZBB-NEXT: mv a5, a4
; RV64ZBB-NEXT: beq a1, a3, .LBB22_2
; RV64ZBB-NEXT: # %bb.1:
; RV64ZBB-NEXT: slt a5, a3, a1
; RV64ZBB-NEXT: .LBB22_2:
; RV64ZBB-NEXT: bnez a5, .LBB22_4
; RV64ZBB-NEXT: # %bb.3:
; RV64ZBB-NEXT: sub a1, a3, a1
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a2, a0
; RV64ZBB-NEXT: ret
; RV64ZBB-NEXT: .LBB22_4:
; RV64ZBB-NEXT: sltu a4, a0, a2
; RV64ZBB-NEXT: sub a1, a1, a3
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%cmp = icmp sge i128 %a, %b
%ab = sub i128 %a, %b
%ba = sub i128 %b, %a
%sel = select i1 %cmp, i128 %ab, i128 %ba
ret i128 %sel
}
;
; abs(sub_nsw(x, y)) -> abds(a,b)
;
define i8 @abd_subnsw_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: slli a1, a0, 24
; RV32I-NEXT: srai a1, a1, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i8:
; RV64I: # %bb.0:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: slli a1, a0, 56
; RV64I-NEXT: srai a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i8:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sub a0, a0, a1
; RV32ZBB-NEXT: sext.b a0, a0
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i8:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: subw a0, a0, a1
; RV64ZBB-NEXT: sext.b a0, a0
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%sub = sub nsw i8 %a, %b
%abs = call i8 @llvm.abs.i8(i8 %sub, i1 false)
ret i8 %abs
}
define i8 @abd_subnsw_i8_undef(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i8_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: slli a1, a0, 24
; RV32I-NEXT: srai a1, a1, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i8_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: slli a1, a0, 56
; RV64I-NEXT: srai a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i8_undef:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sub a0, a0, a1
; RV32ZBB-NEXT: sext.b a0, a0
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i8_undef:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: subw a0, a0, a1
; RV64ZBB-NEXT: sext.b a0, a0
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%sub = sub nsw i8 %a, %b
%abs = call i8 @llvm.abs.i8(i8 %sub, i1 true)
ret i8 %abs
}
define i16 @abd_subnsw_i16(i16 %a, i16 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: slli a1, a0, 16
; RV32I-NEXT: srai a1, a1, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: slli a1, a0, 48
; RV64I-NEXT: srai a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i16:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sub a0, a0, a1
; RV32ZBB-NEXT: sext.h a0, a0
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i16:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: subw a0, a0, a1
; RV64ZBB-NEXT: sext.h a0, a0
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%sub = sub nsw i16 %a, %b
%abs = call i16 @llvm.abs.i16(i16 %sub, i1 false)
ret i16 %abs
}
define i16 @abd_subnsw_i16_undef(i16 %a, i16 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i16_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: slli a1, a0, 16
; RV32I-NEXT: srai a1, a1, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i16_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: slli a1, a0, 48
; RV64I-NEXT: srai a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i16_undef:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sub a0, a0, a1
; RV32ZBB-NEXT: sext.h a0, a0
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i16_undef:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: subw a0, a0, a1
; RV64ZBB-NEXT: sext.h a0, a0
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%sub = sub nsw i16 %a, %b
%abs = call i16 @llvm.abs.i16(i16 %sub, i1 true)
ret i16 %abs
}
define i32 @abd_subnsw_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: sraiw a1, a0, 31
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sub a0, a0, a1
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: subw a0, a0, a1
; RV64ZBB-NEXT: negw a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%sub = sub nsw i32 %a, %b
%abs = call i32 @llvm.abs.i32(i32 %sub, i1 false)
ret i32 %abs
}
define i32 @abd_subnsw_i32_undef(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i32_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i32_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: sraiw a1, a0, 31
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i32_undef:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sub a0, a0, a1
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i32_undef:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: subw a0, a0, a1
; RV64ZBB-NEXT: negw a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%sub = sub nsw i32 %a, %b
%abs = call i32 @llvm.abs.i32(i32 %sub, i1 true)
ret i32 %abs
}
define i64 @abd_subnsw_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: bgez a1, .LBB29_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: snez a2, a0
; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: .LBB29_2:
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sltu a4, a0, a2
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: bgez a1, .LBB29_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: snez a2, a0
; RV32ZBB-NEXT: add a1, a1, a2
; RV32ZBB-NEXT: neg a1, a1
; RV32ZBB-NEXT: neg a0, a0
; RV32ZBB-NEXT: .LBB29_2:
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i64:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sub a0, a0, a1
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%sub = sub nsw i64 %a, %b
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 false)
ret i64 %abs
}
define i64 @abd_subnsw_i64_undef(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i64_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: bgez a1, .LBB30_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: snez a2, a0
; RV32I-NEXT: add a1, a1, a2
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: .LBB30_2:
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i64_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i64_undef:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sltu a4, a0, a2
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: bgez a1, .LBB30_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: snez a2, a0
; RV32ZBB-NEXT: add a1, a1, a2
; RV32ZBB-NEXT: neg a1, a1
; RV32ZBB-NEXT: neg a0, a0
; RV32ZBB-NEXT: .LBB30_2:
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i64_undef:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sub a0, a0, a1
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%sub = sub nsw i64 %a, %b
%abs = call i64 @llvm.abs.i64(i64 %sub, i1 true)
ret i64 %abs
}
define i128 @abd_subnsw_i128(i128 %a, i128 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i128:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a4, 0(a2)
; RV32I-NEXT: lw a3, 4(a2)
; RV32I-NEXT: lw a5, 8(a2)
; RV32I-NEXT: lw a6, 12(a2)
; RV32I-NEXT: lw a7, 8(a1)
; RV32I-NEXT: lw t0, 12(a1)
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: lw a1, 4(a1)
; RV32I-NEXT: sltu t1, a7, a5
; RV32I-NEXT: sub t0, t0, a6
; RV32I-NEXT: sltu a6, a2, a4
; RV32I-NEXT: sub t0, t0, t1
; RV32I-NEXT: mv t1, a6
; RV32I-NEXT: beq a1, a3, .LBB31_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu t1, a1, a3
; RV32I-NEXT: .LBB31_2:
; RV32I-NEXT: sub a5, a7, a5
; RV32I-NEXT: sub a3, a1, a3
; RV32I-NEXT: sltu a1, a5, t1
; RV32I-NEXT: sub a5, a5, t1
; RV32I-NEXT: sub a1, t0, a1
; RV32I-NEXT: sub a3, a3, a6
; RV32I-NEXT: sub a2, a2, a4
; RV32I-NEXT: bgez a1, .LBB31_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: snez a4, a3
; RV32I-NEXT: snez a6, a2
; RV32I-NEXT: neg a7, a5
; RV32I-NEXT: snez a5, a5
; RV32I-NEXT: neg a2, a2
; RV32I-NEXT: or a4, a6, a4
; RV32I-NEXT: add a1, a1, a5
; RV32I-NEXT: add a3, a3, a6
; RV32I-NEXT: sltu a6, a7, a4
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: sub a5, a7, a4
; RV32I-NEXT: sub a1, a1, a6
; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: .LBB31_4:
; RV32I-NEXT: sw a2, 0(a0)
; RV32I-NEXT: sw a3, 4(a0)
; RV32I-NEXT: sw a5, 8(a0)
; RV32I-NEXT: sw a1, 12(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i128:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a4, a0, a2
; RV64I-NEXT: sub a1, a1, a3
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: bgez a1, .LBB31_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: snez a2, a0
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: neg a1, a1
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: .LBB31_2:
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i128:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a4, 0(a2)
; RV32ZBB-NEXT: lw a3, 4(a2)
; RV32ZBB-NEXT: lw a5, 8(a2)
; RV32ZBB-NEXT: lw a6, 12(a2)
; RV32ZBB-NEXT: lw a7, 8(a1)
; RV32ZBB-NEXT: lw t0, 12(a1)
; RV32ZBB-NEXT: lw a2, 0(a1)
; RV32ZBB-NEXT: lw a1, 4(a1)
; RV32ZBB-NEXT: sltu t1, a7, a5
; RV32ZBB-NEXT: sub t0, t0, a6
; RV32ZBB-NEXT: sltu a6, a2, a4
; RV32ZBB-NEXT: sub t0, t0, t1
; RV32ZBB-NEXT: mv t1, a6
; RV32ZBB-NEXT: beq a1, a3, .LBB31_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: sltu t1, a1, a3
; RV32ZBB-NEXT: .LBB31_2:
; RV32ZBB-NEXT: sub a5, a7, a5
; RV32ZBB-NEXT: sub a3, a1, a3
; RV32ZBB-NEXT: sltu a1, a5, t1
; RV32ZBB-NEXT: sub a5, a5, t1
; RV32ZBB-NEXT: sub a1, t0, a1
; RV32ZBB-NEXT: sub a3, a3, a6
; RV32ZBB-NEXT: sub a2, a2, a4
; RV32ZBB-NEXT: bgez a1, .LBB31_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: snez a4, a3
; RV32ZBB-NEXT: snez a6, a2
; RV32ZBB-NEXT: neg a7, a5
; RV32ZBB-NEXT: snez a5, a5
; RV32ZBB-NEXT: neg a2, a2
; RV32ZBB-NEXT: or a4, a6, a4
; RV32ZBB-NEXT: add a1, a1, a5
; RV32ZBB-NEXT: add a3, a3, a6
; RV32ZBB-NEXT: sltu a6, a7, a4
; RV32ZBB-NEXT: neg a1, a1
; RV32ZBB-NEXT: sub a5, a7, a4
; RV32ZBB-NEXT: sub a1, a1, a6
; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: .LBB31_4:
; RV32ZBB-NEXT: sw a2, 0(a0)
; RV32ZBB-NEXT: sw a3, 4(a0)
; RV32ZBB-NEXT: sw a5, 8(a0)
; RV32ZBB-NEXT: sw a1, 12(a0)
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i128:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sltu a4, a0, a2
; RV64ZBB-NEXT: sub a1, a1, a3
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: bgez a1, .LBB31_2
; RV64ZBB-NEXT: # %bb.1:
; RV64ZBB-NEXT: snez a2, a0
; RV64ZBB-NEXT: add a1, a1, a2
; RV64ZBB-NEXT: neg a1, a1
; RV64ZBB-NEXT: neg a0, a0
; RV64ZBB-NEXT: .LBB31_2:
; RV64ZBB-NEXT: ret
%sub = sub nsw i128 %a, %b
%abs = call i128 @llvm.abs.i128(i128 %sub, i1 false)
ret i128 %abs
}
define i128 @abd_subnsw_i128_undef(i128 %a, i128 %b) nounwind {
; RV32I-LABEL: abd_subnsw_i128_undef:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a4, 0(a2)
; RV32I-NEXT: lw a3, 4(a2)
; RV32I-NEXT: lw a5, 8(a2)
; RV32I-NEXT: lw a6, 12(a2)
; RV32I-NEXT: lw a7, 8(a1)
; RV32I-NEXT: lw t0, 12(a1)
; RV32I-NEXT: lw a2, 0(a1)
; RV32I-NEXT: lw a1, 4(a1)
; RV32I-NEXT: sltu t1, a7, a5
; RV32I-NEXT: sub t0, t0, a6
; RV32I-NEXT: sltu a6, a2, a4
; RV32I-NEXT: sub t0, t0, t1
; RV32I-NEXT: mv t1, a6
; RV32I-NEXT: beq a1, a3, .LBB32_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu t1, a1, a3
; RV32I-NEXT: .LBB32_2:
; RV32I-NEXT: sub a5, a7, a5
; RV32I-NEXT: sub a3, a1, a3
; RV32I-NEXT: sltu a1, a5, t1
; RV32I-NEXT: sub a5, a5, t1
; RV32I-NEXT: sub a1, t0, a1
; RV32I-NEXT: sub a3, a3, a6
; RV32I-NEXT: sub a2, a2, a4
; RV32I-NEXT: bgez a1, .LBB32_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: snez a4, a3
; RV32I-NEXT: snez a6, a2
; RV32I-NEXT: neg a7, a5
; RV32I-NEXT: snez a5, a5
; RV32I-NEXT: neg a2, a2
; RV32I-NEXT: or a4, a6, a4
; RV32I-NEXT: add a1, a1, a5
; RV32I-NEXT: add a3, a3, a6
; RV32I-NEXT: sltu a6, a7, a4
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: sub a5, a7, a4
; RV32I-NEXT: sub a1, a1, a6
; RV32I-NEXT: neg a3, a3
; RV32I-NEXT: .LBB32_4:
; RV32I-NEXT: sw a2, 0(a0)
; RV32I-NEXT: sw a3, 4(a0)
; RV32I-NEXT: sw a5, 8(a0)
; RV32I-NEXT: sw a1, 12(a0)
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_subnsw_i128_undef:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a4, a0, a2
; RV64I-NEXT: sub a1, a1, a3
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: bgez a1, .LBB32_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: snez a2, a0
; RV64I-NEXT: add a1, a1, a2
; RV64I-NEXT: neg a1, a1
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: .LBB32_2:
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_subnsw_i128_undef:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a4, 0(a2)
; RV32ZBB-NEXT: lw a3, 4(a2)
; RV32ZBB-NEXT: lw a5, 8(a2)
; RV32ZBB-NEXT: lw a6, 12(a2)
; RV32ZBB-NEXT: lw a7, 8(a1)
; RV32ZBB-NEXT: lw t0, 12(a1)
; RV32ZBB-NEXT: lw a2, 0(a1)
; RV32ZBB-NEXT: lw a1, 4(a1)
; RV32ZBB-NEXT: sltu t1, a7, a5
; RV32ZBB-NEXT: sub t0, t0, a6
; RV32ZBB-NEXT: sltu a6, a2, a4
; RV32ZBB-NEXT: sub t0, t0, t1
; RV32ZBB-NEXT: mv t1, a6
; RV32ZBB-NEXT: beq a1, a3, .LBB32_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: sltu t1, a1, a3
; RV32ZBB-NEXT: .LBB32_2:
; RV32ZBB-NEXT: sub a5, a7, a5
; RV32ZBB-NEXT: sub a3, a1, a3
; RV32ZBB-NEXT: sltu a1, a5, t1
; RV32ZBB-NEXT: sub a5, a5, t1
; RV32ZBB-NEXT: sub a1, t0, a1
; RV32ZBB-NEXT: sub a3, a3, a6
; RV32ZBB-NEXT: sub a2, a2, a4
; RV32ZBB-NEXT: bgez a1, .LBB32_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: snez a4, a3
; RV32ZBB-NEXT: snez a6, a2
; RV32ZBB-NEXT: neg a7, a5
; RV32ZBB-NEXT: snez a5, a5
; RV32ZBB-NEXT: neg a2, a2
; RV32ZBB-NEXT: or a4, a6, a4
; RV32ZBB-NEXT: add a1, a1, a5
; RV32ZBB-NEXT: add a3, a3, a6
; RV32ZBB-NEXT: sltu a6, a7, a4
; RV32ZBB-NEXT: neg a1, a1
; RV32ZBB-NEXT: sub a5, a7, a4
; RV32ZBB-NEXT: sub a1, a1, a6
; RV32ZBB-NEXT: neg a3, a3
; RV32ZBB-NEXT: .LBB32_4:
; RV32ZBB-NEXT: sw a2, 0(a0)
; RV32ZBB-NEXT: sw a3, 4(a0)
; RV32ZBB-NEXT: sw a5, 8(a0)
; RV32ZBB-NEXT: sw a1, 12(a0)
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_subnsw_i128_undef:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sltu a4, a0, a2
; RV64ZBB-NEXT: sub a1, a1, a3
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: bgez a1, .LBB32_2
; RV64ZBB-NEXT: # %bb.1:
; RV64ZBB-NEXT: snez a2, a0
; RV64ZBB-NEXT: add a1, a1, a2
; RV64ZBB-NEXT: neg a1, a1
; RV64ZBB-NEXT: neg a0, a0
; RV64ZBB-NEXT: .LBB32_2:
; RV64ZBB-NEXT: ret
%sub = sub nsw i128 %a, %b
%abs = call i128 @llvm.abs.i128(i128 %sub, i1 true)
ret i128 %abs
}
;
; negative tests
;
define i32 @abd_sub_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: abd_sub_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_sub_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: sraiw a1, a0, 31
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: subw a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_sub_i32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sub a0, a0, a1
; RV32ZBB-NEXT: neg a1, a0
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_sub_i32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: subw a0, a0, a1
; RV64ZBB-NEXT: negw a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
%sub = sub i32 %a, %b
%abs = call i32 @llvm.abs.i32(i32 %sub, i1 false)
ret i32 %abs
}
;
; sub(select(icmp(a,b),a,b),select(icmp(a,b),b,a)) -> abds(a,b)
;
define i8 @abd_select_i8(i8 %a, i8 %b) nounwind {
; RV32I-LABEL: abd_select_i8:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 24
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: srai a1, a1, 24
; RV32I-NEXT: srai a0, a0, 24
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_select_i8:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 56
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: srai a1, a1, 56
; RV64I-NEXT: srai a0, a0, 56
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_select_i8:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.b a1, a1
; ZBB-NEXT: sext.b a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%cmp = icmp slt i8 %a, %b
%ab = select i1 %cmp, i8 %a, i8 %b
%ba = select i1 %cmp, i8 %b, i8 %a
%sub = sub i8 %ba, %ab
ret i8 %sub
}
define i16 @abd_select_i16(i16 %a, i16 %b) nounwind {
; RV32I-LABEL: abd_select_i16:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a1, a1, 16
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: srai a1, a1, 16
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_select_i16:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a1, 48
; RV64I-NEXT: slli a0, a0, 48
; RV64I-NEXT: srai a1, a1, 48
; RV64I-NEXT: srai a0, a0, 48
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; ZBB-LABEL: abd_select_i16:
; ZBB: # %bb.0:
; ZBB-NEXT: sext.h a1, a1
; ZBB-NEXT: sext.h a0, a0
; ZBB-NEXT: min a2, a0, a1
; ZBB-NEXT: max a0, a0, a1
; ZBB-NEXT: sub a0, a0, a2
; ZBB-NEXT: ret
%cmp = icmp sle i16 %a, %b
%ab = select i1 %cmp, i16 %a, i16 %b
%ba = select i1 %cmp, i16 %b, i16 %a
%sub = sub i16 %ba, %ab
ret i16 %sub
}
define i32 @abd_select_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: abd_select_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: blt a1, a0, .LBB36_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sub a0, a1, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB36_2:
; RV32I-NEXT: sub a0, a0, a1
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_select_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: sext.w a1, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_select_i32:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: min a2, a0, a1
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_select_i32:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sext.w a1, a1
; RV64ZBB-NEXT: sext.w a0, a0
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%cmp = icmp sgt i32 %a, %b
%ab = select i1 %cmp, i32 %a, i32 %b
%ba = select i1 %cmp, i32 %b, i32 %a
%sub = sub i32 %ab, %ba
ret i32 %sub
}
define i64 @abd_select_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: abd_select_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: sltu a4, a2, a0
; RV32I-NEXT: mv a5, a4
; RV32I-NEXT: beq a1, a3, .LBB37_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt a5, a3, a1
; RV32I-NEXT: .LBB37_2:
; RV32I-NEXT: bnez a5, .LBB37_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a2, a0
; RV32I-NEXT: ret
; RV32I-NEXT: .LBB37_4:
; RV32I-NEXT: sltu a4, a0, a2
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a1, a1, a4
; RV32I-NEXT: sub a0, a0, a2
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_select_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: blt a1, a0, .LBB37_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: sub a0, a1, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB37_2:
; RV64I-NEXT: sub a0, a0, a1
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_select_i64:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: sltu a4, a2, a0
; RV32ZBB-NEXT: mv a5, a4
; RV32ZBB-NEXT: beq a1, a3, .LBB37_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt a5, a3, a1
; RV32ZBB-NEXT: .LBB37_2:
; RV32ZBB-NEXT: bnez a5, .LBB37_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a2, a0
; RV32ZBB-NEXT: ret
; RV32ZBB-NEXT: .LBB37_4:
; RV32ZBB-NEXT: sltu a4, a0, a2
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a1, a1, a4
; RV32ZBB-NEXT: sub a0, a0, a2
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_select_i64:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: min a2, a0, a1
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%cmp = icmp sge i64 %a, %b
%ab = select i1 %cmp, i64 %a, i64 %b
%ba = select i1 %cmp, i64 %b, i64 %a
%sub = sub i64 %ab, %ba
ret i64 %sub
}
define i128 @abd_select_i128(i128 %a, i128 %b) nounwind {
; RV32I-LABEL: abd_select_i128:
; RV32I: # %bb.0:
; RV32I-NEXT: lw a3, 0(a1)
; RV32I-NEXT: lw a4, 4(a1)
; RV32I-NEXT: lw a5, 8(a1)
; RV32I-NEXT: lw a7, 12(a1)
; RV32I-NEXT: lw a1, 0(a2)
; RV32I-NEXT: lw a6, 8(a2)
; RV32I-NEXT: lw t1, 12(a2)
; RV32I-NEXT: lw a2, 4(a2)
; RV32I-NEXT: sltu t0, a6, a5
; RV32I-NEXT: mv t4, t0
; RV32I-NEXT: beq a7, t1, .LBB38_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt t4, t1, a7
; RV32I-NEXT: .LBB38_2:
; RV32I-NEXT: sltu t2, a1, a3
; RV32I-NEXT: sltu t5, a2, a4
; RV32I-NEXT: mv t3, t2
; RV32I-NEXT: beq a4, a2, .LBB38_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv t3, t5
; RV32I-NEXT: .LBB38_4:
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: xor t6, a7, t1
; RV32I-NEXT: xor s0, a5, a6
; RV32I-NEXT: or t6, s0, t6
; RV32I-NEXT: beqz t6, .LBB38_6
; RV32I-NEXT: # %bb.5:
; RV32I-NEXT: mv t3, t4
; RV32I-NEXT: .LBB38_6:
; RV32I-NEXT: mv t4, t2
; RV32I-NEXT: beq a2, a4, .LBB38_8
; RV32I-NEXT: # %bb.7:
; RV32I-NEXT: mv t4, t5
; RV32I-NEXT: .LBB38_8:
; RV32I-NEXT: sltu t5, a3, a1
; RV32I-NEXT: mv t6, t5
; RV32I-NEXT: beq a4, a2, .LBB38_10
; RV32I-NEXT: # %bb.9:
; RV32I-NEXT: sltu t6, a4, a2
; RV32I-NEXT: .LBB38_10:
; RV32I-NEXT: bnez t3, .LBB38_12
; RV32I-NEXT: # %bb.11:
; RV32I-NEXT: sub a7, t1, a7
; RV32I-NEXT: sub a5, a6, a5
; RV32I-NEXT: sub a1, a1, a3
; RV32I-NEXT: sub a2, a2, a4
; RV32I-NEXT: sub a4, a7, t0
; RV32I-NEXT: sltu a6, a5, t4
; RV32I-NEXT: sub a3, a2, t2
; RV32I-NEXT: sub a2, a4, a6
; RV32I-NEXT: sub a4, a5, t4
; RV32I-NEXT: j .LBB38_13
; RV32I-NEXT: .LBB38_12:
; RV32I-NEXT: sltu t0, a5, a6
; RV32I-NEXT: sub a7, a7, t1
; RV32I-NEXT: sub a5, a5, a6
; RV32I-NEXT: sub a1, a3, a1
; RV32I-NEXT: sub a4, a4, a2
; RV32I-NEXT: sub a2, a7, t0
; RV32I-NEXT: sltu a6, a5, t6
; RV32I-NEXT: sub a3, a4, t5
; RV32I-NEXT: sub a2, a2, a6
; RV32I-NEXT: sub a4, a5, t6
; RV32I-NEXT: .LBB38_13:
; RV32I-NEXT: sw a1, 0(a0)
; RV32I-NEXT: sw a3, 4(a0)
; RV32I-NEXT: sw a4, 8(a0)
; RV32I-NEXT: sw a2, 12(a0)
; RV32I-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: abd_select_i128:
; RV64I: # %bb.0:
; RV64I-NEXT: sltu a4, a2, a0
; RV64I-NEXT: mv a5, a4
; RV64I-NEXT: beq a1, a3, .LBB38_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: slt a5, a3, a1
; RV64I-NEXT: .LBB38_2:
; RV64I-NEXT: bnez a5, .LBB38_4
; RV64I-NEXT: # %bb.3:
; RV64I-NEXT: sub a1, a3, a1
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a2, a0
; RV64I-NEXT: ret
; RV64I-NEXT: .LBB38_4:
; RV64I-NEXT: sltu a4, a0, a2
; RV64I-NEXT: sub a1, a1, a3
; RV64I-NEXT: sub a1, a1, a4
; RV64I-NEXT: sub a0, a0, a2
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: abd_select_i128:
; RV32ZBB: # %bb.0:
; RV32ZBB-NEXT: lw a3, 0(a1)
; RV32ZBB-NEXT: lw a4, 4(a1)
; RV32ZBB-NEXT: lw a5, 8(a1)
; RV32ZBB-NEXT: lw a7, 12(a1)
; RV32ZBB-NEXT: lw a1, 0(a2)
; RV32ZBB-NEXT: lw a6, 8(a2)
; RV32ZBB-NEXT: lw t1, 12(a2)
; RV32ZBB-NEXT: lw a2, 4(a2)
; RV32ZBB-NEXT: sltu t0, a6, a5
; RV32ZBB-NEXT: mv t4, t0
; RV32ZBB-NEXT: beq a7, t1, .LBB38_2
; RV32ZBB-NEXT: # %bb.1:
; RV32ZBB-NEXT: slt t4, t1, a7
; RV32ZBB-NEXT: .LBB38_2:
; RV32ZBB-NEXT: sltu t2, a1, a3
; RV32ZBB-NEXT: sltu t5, a2, a4
; RV32ZBB-NEXT: mv t3, t2
; RV32ZBB-NEXT: beq a4, a2, .LBB38_4
; RV32ZBB-NEXT: # %bb.3:
; RV32ZBB-NEXT: mv t3, t5
; RV32ZBB-NEXT: .LBB38_4:
; RV32ZBB-NEXT: addi sp, sp, -16
; RV32ZBB-NEXT: sw s0, 12(sp) # 4-byte Folded Spill
; RV32ZBB-NEXT: xor t6, a7, t1
; RV32ZBB-NEXT: xor s0, a5, a6
; RV32ZBB-NEXT: or t6, s0, t6
; RV32ZBB-NEXT: beqz t6, .LBB38_6
; RV32ZBB-NEXT: # %bb.5:
; RV32ZBB-NEXT: mv t3, t4
; RV32ZBB-NEXT: .LBB38_6:
; RV32ZBB-NEXT: mv t4, t2
; RV32ZBB-NEXT: beq a2, a4, .LBB38_8
; RV32ZBB-NEXT: # %bb.7:
; RV32ZBB-NEXT: mv t4, t5
; RV32ZBB-NEXT: .LBB38_8:
; RV32ZBB-NEXT: sltu t5, a3, a1
; RV32ZBB-NEXT: mv t6, t5
; RV32ZBB-NEXT: beq a4, a2, .LBB38_10
; RV32ZBB-NEXT: # %bb.9:
; RV32ZBB-NEXT: sltu t6, a4, a2
; RV32ZBB-NEXT: .LBB38_10:
; RV32ZBB-NEXT: bnez t3, .LBB38_12
; RV32ZBB-NEXT: # %bb.11:
; RV32ZBB-NEXT: sub a7, t1, a7
; RV32ZBB-NEXT: sub a5, a6, a5
; RV32ZBB-NEXT: sub a1, a1, a3
; RV32ZBB-NEXT: sub a2, a2, a4
; RV32ZBB-NEXT: sub a4, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t4
; RV32ZBB-NEXT: sub a3, a2, t2
; RV32ZBB-NEXT: sub a2, a4, a6
; RV32ZBB-NEXT: sub a4, a5, t4
; RV32ZBB-NEXT: j .LBB38_13
; RV32ZBB-NEXT: .LBB38_12:
; RV32ZBB-NEXT: sltu t0, a5, a6
; RV32ZBB-NEXT: sub a7, a7, t1
; RV32ZBB-NEXT: sub a5, a5, a6
; RV32ZBB-NEXT: sub a1, a3, a1
; RV32ZBB-NEXT: sub a4, a4, a2
; RV32ZBB-NEXT: sub a2, a7, t0
; RV32ZBB-NEXT: sltu a6, a5, t6
; RV32ZBB-NEXT: sub a3, a4, t5
; RV32ZBB-NEXT: sub a2, a2, a6
; RV32ZBB-NEXT: sub a4, a5, t6
; RV32ZBB-NEXT: .LBB38_13:
; RV32ZBB-NEXT: sw a1, 0(a0)
; RV32ZBB-NEXT: sw a3, 4(a0)
; RV32ZBB-NEXT: sw a4, 8(a0)
; RV32ZBB-NEXT: sw a2, 12(a0)
; RV32ZBB-NEXT: lw s0, 12(sp) # 4-byte Folded Reload
; RV32ZBB-NEXT: addi sp, sp, 16
; RV32ZBB-NEXT: ret
;
; RV64ZBB-LABEL: abd_select_i128:
; RV64ZBB: # %bb.0:
; RV64ZBB-NEXT: sltu a4, a2, a0
; RV64ZBB-NEXT: mv a5, a4
; RV64ZBB-NEXT: beq a1, a3, .LBB38_2
; RV64ZBB-NEXT: # %bb.1:
; RV64ZBB-NEXT: slt a5, a3, a1
; RV64ZBB-NEXT: .LBB38_2:
; RV64ZBB-NEXT: bnez a5, .LBB38_4
; RV64ZBB-NEXT: # %bb.3:
; RV64ZBB-NEXT: sub a1, a3, a1
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a2, a0
; RV64ZBB-NEXT: ret
; RV64ZBB-NEXT: .LBB38_4:
; RV64ZBB-NEXT: sltu a4, a0, a2
; RV64ZBB-NEXT: sub a1, a1, a3
; RV64ZBB-NEXT: sub a1, a1, a4
; RV64ZBB-NEXT: sub a0, a0, a2
; RV64ZBB-NEXT: ret
%cmp = icmp slt i128 %a, %b
%ab = select i1 %cmp, i128 %a, i128 %b
%ba = select i1 %cmp, i128 %b, i128 %a
%sub = sub i128 %ba, %ab
ret i128 %sub
}
declare i8 @llvm.abs.i8(i8, i1)
declare i16 @llvm.abs.i16(i16, i1)
declare i32 @llvm.abs.i32(i32, i1)
declare i64 @llvm.abs.i64(i64, i1)
declare i128 @llvm.abs.i128(i128, i1)
declare i8 @llvm.smax.i8(i8, i8)
declare i16 @llvm.smax.i16(i16, i16)
declare i32 @llvm.smax.i32(i32, i32)
declare i64 @llvm.smax.i64(i64, i64)
declare i8 @llvm.smin.i8(i8, i8)
declare i16 @llvm.smin.i16(i16, i16)
declare i32 @llvm.smin.i32(i32, i32)
declare i64 @llvm.smin.i64(i64, i64)