This converts MULW to MUL if the upper bits aren't used. This will give more opportunities to use c.mul with Zcb.
135 lines
4.7 KiB
LLVM
135 lines
4.7 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV32I
|
|
; RUN: llc -mtriple=riscv32 -mattr=+zbb -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV32ZBB
|
|
; RUN: llc -mtriple=riscv32 -mattr=+xtheadbb -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV32XTHEADBB
|
|
; RUN: llc -mtriple=riscv32 -mattr=+xtheadmac -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV32XTHEADMAC
|
|
; RUN: llc -mtriple=riscv32 -mattr=+xtheadmac -mattr=+xtheadbb -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV32XTHEAD
|
|
; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV64I
|
|
; RUN: llc -mtriple=riscv64 -mattr=+zbb -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV64ZBB
|
|
; RUN: llc -mtriple=riscv64 -mattr=+xtheadmac -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV64XTHEADMAC
|
|
; RUN: llc -mtriple=riscv64 -mattr=+xtheadbb -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV64XTHEADBB
|
|
; RUN: llc -mtriple=riscv64 -mattr=+xtheadmac -mattr=+xtheadbb -mattr=+m -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s -check-prefix=RV64XTHEAD
|
|
|
|
define i32 @f(i32 %A, i32 %B, i32 %C) {
|
|
; RV32I-LABEL: f:
|
|
; RV32I: # %bb.0: # %entry
|
|
; RV32I-NEXT: mul a0, a1, a0
|
|
; RV32I-NEXT: slli a1, a0, 26
|
|
; RV32I-NEXT: srli a1, a1, 28
|
|
; RV32I-NEXT: slli a0, a0, 20
|
|
; RV32I-NEXT: srli a0, a0, 25
|
|
; RV32I-NEXT: mul a0, a1, a0
|
|
; RV32I-NEXT: add a0, a0, a2
|
|
; RV32I-NEXT: ret
|
|
;
|
|
; RV32ZBB-LABEL: f:
|
|
; RV32ZBB: # %bb.0: # %entry
|
|
; RV32ZBB-NEXT: mul a0, a1, a0
|
|
; RV32ZBB-NEXT: slli a1, a0, 26
|
|
; RV32ZBB-NEXT: srli a1, a1, 28
|
|
; RV32ZBB-NEXT: slli a0, a0, 20
|
|
; RV32ZBB-NEXT: srli a0, a0, 25
|
|
; RV32ZBB-NEXT: mul a0, a1, a0
|
|
; RV32ZBB-NEXT: add a0, a0, a2
|
|
; RV32ZBB-NEXT: ret
|
|
;
|
|
; RV32XTHEADBB-LABEL: f:
|
|
; RV32XTHEADBB: # %bb.0: # %entry
|
|
; RV32XTHEADBB-NEXT: mul a0, a1, a0
|
|
; RV32XTHEADBB-NEXT: th.extu a1, a0, 5, 2
|
|
; RV32XTHEADBB-NEXT: th.extu a0, a0, 11, 5
|
|
; RV32XTHEADBB-NEXT: mul a0, a1, a0
|
|
; RV32XTHEADBB-NEXT: add a0, a0, a2
|
|
; RV32XTHEADBB-NEXT: ret
|
|
;
|
|
; RV32XTHEADMAC-LABEL: f:
|
|
; RV32XTHEADMAC: # %bb.0: # %entry
|
|
; RV32XTHEADMAC-NEXT: mul a0, a1, a0
|
|
; RV32XTHEADMAC-NEXT: slli a1, a0, 26
|
|
; RV32XTHEADMAC-NEXT: srli a1, a1, 28
|
|
; RV32XTHEADMAC-NEXT: slli a0, a0, 20
|
|
; RV32XTHEADMAC-NEXT: srli a0, a0, 25
|
|
; RV32XTHEADMAC-NEXT: th.mulah a2, a1, a0
|
|
; RV32XTHEADMAC-NEXT: mv a0, a2
|
|
; RV32XTHEADMAC-NEXT: ret
|
|
;
|
|
; RV32XTHEAD-LABEL: f:
|
|
; RV32XTHEAD: # %bb.0: # %entry
|
|
; RV32XTHEAD-NEXT: mul a0, a1, a0
|
|
; RV32XTHEAD-NEXT: th.extu a1, a0, 5, 2
|
|
; RV32XTHEAD-NEXT: th.extu a0, a0, 11, 5
|
|
; RV32XTHEAD-NEXT: th.mulah a2, a1, a0
|
|
; RV32XTHEAD-NEXT: mv a0, a2
|
|
; RV32XTHEAD-NEXT: ret
|
|
;
|
|
; RV64I-LABEL: f:
|
|
; RV64I: # %bb.0: # %entry
|
|
; RV64I-NEXT: mul a0, a1, a0
|
|
; RV64I-NEXT: slli a1, a0, 58
|
|
; RV64I-NEXT: srli a1, a1, 60
|
|
; RV64I-NEXT: slli a0, a0, 52
|
|
; RV64I-NEXT: srli a0, a0, 57
|
|
; RV64I-NEXT: mul a0, a1, a0
|
|
; RV64I-NEXT: addw a0, a0, a2
|
|
; RV64I-NEXT: ret
|
|
;
|
|
; RV64ZBB-LABEL: f:
|
|
; RV64ZBB: # %bb.0: # %entry
|
|
; RV64ZBB-NEXT: mul a0, a1, a0
|
|
; RV64ZBB-NEXT: slli a1, a0, 58
|
|
; RV64ZBB-NEXT: srli a1, a1, 60
|
|
; RV64ZBB-NEXT: slli a0, a0, 52
|
|
; RV64ZBB-NEXT: srli a0, a0, 57
|
|
; RV64ZBB-NEXT: mul a0, a1, a0
|
|
; RV64ZBB-NEXT: addw a0, a0, a2
|
|
; RV64ZBB-NEXT: ret
|
|
;
|
|
; RV64XTHEADMAC-LABEL: f:
|
|
; RV64XTHEADMAC: # %bb.0: # %entry
|
|
; RV64XTHEADMAC-NEXT: mul a0, a1, a0
|
|
; RV64XTHEADMAC-NEXT: slli a1, a0, 58
|
|
; RV64XTHEADMAC-NEXT: srli a1, a1, 60
|
|
; RV64XTHEADMAC-NEXT: slli a0, a0, 52
|
|
; RV64XTHEADMAC-NEXT: srli a0, a0, 57
|
|
; RV64XTHEADMAC-NEXT: th.mulah a2, a1, a0
|
|
; RV64XTHEADMAC-NEXT: mv a0, a2
|
|
; RV64XTHEADMAC-NEXT: ret
|
|
;
|
|
; RV64XTHEADBB-LABEL: f:
|
|
; RV64XTHEADBB: # %bb.0: # %entry
|
|
; RV64XTHEADBB-NEXT: mul a0, a1, a0
|
|
; RV64XTHEADBB-NEXT: th.extu a1, a0, 5, 2
|
|
; RV64XTHEADBB-NEXT: th.extu a0, a0, 11, 5
|
|
; RV64XTHEADBB-NEXT: mul a0, a1, a0
|
|
; RV64XTHEADBB-NEXT: addw a0, a0, a2
|
|
; RV64XTHEADBB-NEXT: ret
|
|
;
|
|
; RV64XTHEAD-LABEL: f:
|
|
; RV64XTHEAD: # %bb.0: # %entry
|
|
; RV64XTHEAD-NEXT: mul a0, a1, a0
|
|
; RV64XTHEAD-NEXT: th.extu a1, a0, 5, 2
|
|
; RV64XTHEAD-NEXT: th.extu a0, a0, 11, 5
|
|
; RV64XTHEAD-NEXT: th.mulah a2, a1, a0
|
|
; RV64XTHEAD-NEXT: mv a0, a2
|
|
; RV64XTHEAD-NEXT: ret
|
|
entry:
|
|
%mul = mul nsw i32 %B, %A
|
|
%0 = lshr i32 %mul, 2
|
|
%and = and i32 %0, 15
|
|
%1 = lshr i32 %mul, 5
|
|
%and2 = and i32 %1, 127
|
|
%mul3 = mul nuw nsw i32 %and, %and2
|
|
%add = add i32 %mul3, %C
|
|
ret i32 %add
|
|
}
|