[RISCV] Add pre-emit pass to make more instructions compressible

When optimizing for size, this pass searches for instructions that are
prevented from being compressed by one of the following:

1. The use of a single uncompressed register.
2. A base register + offset where the offset is too large to be
   compressed and the base register may or may not already be compressed.

In the first case, if there is a compressed register available, then the
uncompressed register is copied to the compressed register and its uses
replaced. This is only done if there are enough uses that code size
would be improved.

In the second case, if a compressed register is available, then the
original base register is copied and adjusted such that:

new_base_register = base_register + adjustment
base_register + large_offset = new_base_register + small_offset

and the uses of the base register are replaced with the new base
register. Again this is only done if there are enough uses for code size
to be improved.

This pass was authored by Lewis Revill, with large offset optimization
added by Craig Blackmore.

Differential Revision: https://reviews.llvm.org/D92105
This commit is contained in:
Lewis Revill
2022-04-25 12:24:09 +01:00
parent b3c5c22c13
commit 29a5a7c6d4
8 changed files with 1867 additions and 1 deletions

View File

@@ -21,6 +21,7 @@ add_public_tablegen_target(RISCVCommonTableGen)
add_llvm_target(RISCVCodeGen
RISCVAsmPrinter.cpp
RISCVCallLowering.cpp
RISCVMakeCompressible.cpp
RISCVExpandAtomicPseudoInsts.cpp
RISCVExpandPseudoInsts.cpp
RISCVFrameLowering.cpp

View File

@@ -37,6 +37,9 @@ bool lowerRISCVMachineOperandToMCOperand(const MachineOperand &MO,
FunctionPass *createRISCVISelDag(RISCVTargetMachine &TM);
FunctionPass *createRISCVMakeCompressibleOptPass();
void initializeRISCVMakeCompressibleOptPass(PassRegistry &);
FunctionPass *createRISCVGatherScatterLoweringPass();
void initializeRISCVGatherScatterLoweringPass(PassRegistry &);

View File

@@ -0,0 +1,382 @@
//===-- RISCVMakeCompressible.cpp - Make more instructions compressible ---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass searches for instructions that are prevented from being compressed
// by one of the following:
//
// 1. The use of a single uncompressed register.
// 2. A base register + offset where the offset is too large to be compressed
// and the base register may or may not be compressed.
//
//
// For case 1, if a compressed register is available, then the uncompressed
// register is copied to the compressed register and its uses are replaced.
//
// For example, storing zero uses the uncompressible zero register:
// sw zero, 0(a0) # if zero
// sw zero, 8(a0) # if zero
// sw zero, 4(a0) # if zero
// sw zero, 24(a0) # if zero
//
// If a compressed register (e.g. a1) is available, the above can be transformed
// to the following to improve code size:
// li a1, 0
// c.sw a1, 0(a0)
// c.sw a1, 8(a0)
// c.sw a1, 4(a0)
// c.sw a1, 24(a0)
//
//
// For case 2, if a compressed register is available, then the original base
// is copied and adjusted such that:
//
// new_base_register = base_register + adjustment
// base_register + large_offset = new_base_register + small_offset
//
// For example, the following offsets are too large for c.sw:
// lui a2, 983065
// sw a1, -236(a2)
// sw a1, -240(a2)
// sw a1, -244(a2)
// sw a1, -248(a2)
// sw a1, -252(a2)
// sw a0, -256(a2)
//
// If a compressed register is available (e.g. a3), a new base could be created
// such that the addresses can accessed with a compressible offset, thus
// improving code size:
// lui a2, 983065
// addi a3, a2, -256
// c.sw a1, 20(a3)
// c.sw a1, 16(a3)
// c.sw a1, 12(a3)
// c.sw a1, 8(a3)
// c.sw a1, 4(a3)
// c.sw a0, 0(a3)
//
//
// This optimization is only applied if there are enough uses of the copied
// register for code size to be reduced.
//
//===----------------------------------------------------------------------===//
#include "RISCV.h"
#include "RISCVSubtarget.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
#define DEBUG_TYPE "riscv-make-compressible"
#define RISCV_COMPRESS_INSTRS_NAME "RISCV Make Compressible"
namespace {
struct RISCVMakeCompressibleOpt : public MachineFunctionPass {
static char ID;
bool runOnMachineFunction(MachineFunction &Fn) override;
RISCVMakeCompressibleOpt() : MachineFunctionPass(ID) {
initializeRISCVMakeCompressibleOptPass(*PassRegistry::getPassRegistry());
}
StringRef getPassName() const override { return RISCV_COMPRESS_INSTRS_NAME; }
};
} // namespace
char RISCVMakeCompressibleOpt::ID = 0;
INITIALIZE_PASS(RISCVMakeCompressibleOpt, "riscv-make-compressible",
RISCV_COMPRESS_INSTRS_NAME, false, false)
// Return log2(widthInBytes) of load/store done by Opcode.
static unsigned log2LdstWidth(unsigned Opcode) {
switch (Opcode) {
default:
llvm_unreachable("Unexpected opcode");
case RISCV::LW:
case RISCV::SW:
case RISCV::FLW:
case RISCV::FSW:
return 2;
case RISCV::LD:
case RISCV::SD:
case RISCV::FLD:
case RISCV::FSD:
return 3;
}
}
// Return a mask for the offset bits of a non-stack-pointer based compressed
// load/store.
static uint8_t compressedLDSTOffsetMask(unsigned Opcode) {
return 0x1f << log2LdstWidth(Opcode);
}
// Return true if Offset fits within a compressed stack-pointer based
// load/store.
static bool compressibleSPOffset(int64_t Offset, unsigned Opcode) {
return log2LdstWidth(Opcode) == 2 ? isShiftedUInt<6, 2>(Offset)
: isShiftedUInt<6, 3>(Offset);
}
// Given an offset for a load/store, return the adjustment required to the base
// register such that the address can be accessed with a compressible offset.
// This will return 0 if the offset is already compressible.
static int64_t getBaseAdjustForCompression(int64_t Offset, unsigned Opcode) {
// Return the excess bits that do not fit in a compressible offset.
return Offset & ~compressedLDSTOffsetMask(Opcode);
}
// Return true if Reg is in a compressed register class.
static bool isCompressedReg(Register Reg) {
return RISCV::GPRCRegClass.contains(Reg) ||
RISCV::FPR32CRegClass.contains(Reg) ||
RISCV::FPR64CRegClass.contains(Reg);
}
// Return true if MI is a load for which there exists a compressed version.
static bool isCompressibleLoad(const MachineInstr &MI) {
const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
const unsigned Opcode = MI.getOpcode();
return Opcode == RISCV::LW || (!STI.is64Bit() && Opcode == RISCV::FLW) ||
Opcode == RISCV::LD || Opcode == RISCV::FLD;
}
// Return true if MI is a store for which there exists a compressed version.
static bool isCompressibleStore(const MachineInstr &MI) {
const RISCVSubtarget &STI = MI.getMF()->getSubtarget<RISCVSubtarget>();
const unsigned Opcode = MI.getOpcode();
return Opcode == RISCV::SW || (!STI.is64Bit() && Opcode == RISCV::FSW) ||
Opcode == RISCV::SD || Opcode == RISCV::FSD;
}
// Find a single register and/or large offset which, if compressible, would
// allow the given instruction to be compressed.
//
// Possible return values:
//
// {Reg, 0} - Uncompressed Reg needs replacing with a compressed
// register.
// {Reg, N} - Reg needs replacing with a compressed register and
// N needs adding to the new register. (Reg may be
// compressed or uncompressed).
// {RISCV::NoRegister, 0} - No suitable optimization found for this
// instruction.
static RegImmPair getRegImmPairPreventingCompression(const MachineInstr &MI) {
const unsigned Opcode = MI.getOpcode();
if (isCompressibleLoad(MI) || isCompressibleStore(MI)) {
const MachineOperand &MOImm = MI.getOperand(2);
if (!MOImm.isImm())
return RegImmPair(RISCV::NoRegister, 0);
int64_t Offset = MOImm.getImm();
int64_t NewBaseAdjust = getBaseAdjustForCompression(Offset, Opcode);
Register Base = MI.getOperand(1).getReg();
// Memory accesses via the stack pointer do not have a requirement for
// either of the registers to be compressible and can take a larger offset.
if (RISCV::SPRegClass.contains(Base)) {
if (!compressibleSPOffset(Offset, Opcode) && NewBaseAdjust)
return RegImmPair(Base, NewBaseAdjust);
} else {
Register SrcDest = MI.getOperand(0).getReg();
bool SrcDestCompressed = isCompressedReg(SrcDest);
bool BaseCompressed = isCompressedReg(Base);
// If only Base and/or offset prevent compression, then return Base and
// any adjustment required to make the offset compressible.
if ((!BaseCompressed || NewBaseAdjust) && SrcDestCompressed)
return RegImmPair(Base, NewBaseAdjust);
// For loads, we can only change the base register since dest is defined
// rather than used.
//
// For stores, we can change SrcDest (and Base if SrcDest == Base) but
// cannot resolve an uncompressible offset in this case.
if (isCompressibleStore(MI)) {
if (!SrcDestCompressed && (BaseCompressed || SrcDest == Base) &&
!NewBaseAdjust)
return RegImmPair(SrcDest, NewBaseAdjust);
}
}
}
return RegImmPair(RISCV::NoRegister, 0);
}
// Check all uses after FirstMI of the given register, keeping a vector of
// instructions that would be compressible if the given register (and offset if
// applicable) were compressible.
//
// If there are enough uses for this optimization to improve code size and a
// compressed register is available, return that compressed register.
static Register analyzeCompressibleUses(MachineInstr &FirstMI,
RegImmPair RegImm,
SmallVectorImpl<MachineInstr *> &MIs) {
MachineBasicBlock &MBB = *FirstMI.getParent();
const TargetRegisterInfo *TRI =
MBB.getParent()->getSubtarget().getRegisterInfo();
RegScavenger RS;
RS.enterBasicBlock(MBB);
for (MachineBasicBlock::instr_iterator I = FirstMI.getIterator(),
E = MBB.instr_end();
I != E; ++I) {
MachineInstr &MI = *I;
// Determine if this is an instruction which would benefit from using the
// new register.
RegImmPair CandidateRegImm = getRegImmPairPreventingCompression(MI);
if (CandidateRegImm.Reg == RegImm.Reg &&
CandidateRegImm.Imm == RegImm.Imm) {
// Advance tracking since the value in the new register must be live for
// this instruction too.
RS.forward(I);
MIs.push_back(&MI);
}
// If RegImm.Reg is modified by this instruction, then we cannot optimize
// past this instruction. If the register is already compressed, then it may
// possible to optimize a large offset in the current instruction - this
// will have been detected by the preceeding call to
// getRegImmPairPreventingCompression.
if (MI.modifiesRegister(RegImm.Reg, TRI))
break;
}
// Adjusting the base costs one new uncompressed addi and therefore three uses
// are required for a code size reduction. If no base adjustment is required,
// then copying the register costs one new c.mv (or c.li Rd, 0 for "copying"
// the zero register) and therefore two uses are required for a code size
// reduction.
if (MIs.size() < 2 || (RegImm.Imm != 0 && MIs.size() < 3))
return RISCV::NoRegister;
// Find a compressible register which will be available from the first
// instruction we care about to the last.
const TargetRegisterClass *RCToScavenge;
// Work out the compressed register class from which to scavenge.
if (RISCV::GPRRegClass.contains(RegImm.Reg))
RCToScavenge = &RISCV::GPRCRegClass;
else if (RISCV::FPR32RegClass.contains(RegImm.Reg))
RCToScavenge = &RISCV::FPR32CRegClass;
else if (RISCV::FPR64RegClass.contains(RegImm.Reg))
RCToScavenge = &RISCV::FPR64CRegClass;
else
return RISCV::NoRegister;
return RS.scavengeRegisterBackwards(*RCToScavenge, FirstMI.getIterator(),
/*RestoreAfter=*/false, /*SPAdj=*/0,
/*AllowSpill=*/false);
}
// Update uses of the old register in the given instruction to the new register.
static void updateOperands(MachineInstr &MI, RegImmPair OldRegImm,
Register NewReg) {
unsigned Opcode = MI.getOpcode();
// If this pass is extended to support more instructions, the check for
// definedness may need to be strengthened.
assert((isCompressibleLoad(MI) || isCompressibleStore(MI)) &&
"Unsupported instruction for this optimization.");
// Update registers
for (MachineOperand &MO : MI.operands())
if (MO.isReg() && MO.getReg() == OldRegImm.Reg) {
// Do not update operands that define the old register.
//
// The new register was scavenged for the range of instructions that are
// being updated, therefore it should not be defined within this range
// except possibly in the final instruction.
if (MO.isDef()) {
assert(isCompressibleLoad(MI));
continue;
}
// Update reg
MO.setReg(NewReg);
}
// Update offset
MachineOperand &MOImm = MI.getOperand(2);
int64_t NewOffset = MOImm.getImm() & compressedLDSTOffsetMask(Opcode);
MOImm.setImm(NewOffset);
}
bool RISCVMakeCompressibleOpt::runOnMachineFunction(MachineFunction &Fn) {
// This is a size optimization.
if (skipFunction(Fn.getFunction()) || !Fn.getFunction().hasMinSize())
return false;
const RISCVSubtarget &STI = Fn.getSubtarget<RISCVSubtarget>();
const RISCVInstrInfo &TII = *STI.getInstrInfo();
// This optimization only makes sense if compressed instructions are emitted.
if (!STI.hasStdExtC())
return false;
for (MachineBasicBlock &MBB : Fn) {
LLVM_DEBUG(dbgs() << "MBB: " << MBB.getName() << "\n");
for (MachineInstr &MI : MBB) {
// Determine if this instruction would otherwise be compressed if not for
// an uncompressible register or offset.
RegImmPair RegImm = getRegImmPairPreventingCompression(MI);
if (!RegImm.Reg && RegImm.Imm == 0)
continue;
// Determine if there is a set of instructions for which replacing this
// register with a compressed register (and compressible offset if
// applicable) is possible and will allow compression.
SmallVector<MachineInstr *, 8> MIs;
Register NewReg = analyzeCompressibleUses(MI, RegImm, MIs);
if (!NewReg)
continue;
// Create the appropriate copy and/or offset.
if (RISCV::GPRRegClass.contains(RegImm.Reg)) {
assert(isInt<12>(RegImm.Imm));
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(RISCV::ADDI), NewReg)
.addReg(RegImm.Reg)
.addImm(RegImm.Imm);
} else {
// If we are looking at replacing an FPR register we don't expect to
// have any offset. The only compressible FP instructions with an offset
// are loads and stores, for which the offset applies to the GPR operand
// not the FPR operand.
assert(RegImm.Imm == 0);
unsigned Opcode = RISCV::FPR32RegClass.contains(RegImm.Reg)
? RISCV::FSGNJ_S
: RISCV::FSGNJ_D;
BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(Opcode), NewReg)
.addReg(RegImm.Reg)
.addReg(RegImm.Reg);
}
// Update the set of instructions to use the compressed register and
// compressible offset instead. These instructions should now be
// compressible.
// TODO: Update all uses if RegImm.Imm == 0? Not just those that are
// expected to become compressible.
for (MachineInstr *UpdateMI : MIs)
updateOperands(*UpdateMI, RegImm, NewReg);
}
}
return true;
}
/// Returns an instance of the Make Compressible Optimization pass.
FunctionPass *llvm::createRISCVMakeCompressibleOptPass() {
return new RISCVMakeCompressibleOpt();
}

View File

@@ -46,6 +46,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target());
auto *PR = PassRegistry::getPassRegistry();
initializeGlobalISel(*PR);
initializeRISCVMakeCompressibleOptPass(*PR);
initializeRISCVGatherScatterLoweringPass(*PR);
initializeRISCVMergeBaseOffsetOptPass(*PR);
initializeRISCVSExtWRemovalPass(*PR);
@@ -208,7 +209,10 @@ bool RISCVPassConfig::addGlobalInstructionSelect() {
void RISCVPassConfig::addPreSched2() {}
void RISCVPassConfig::addPreEmitPass() { addPass(&BranchRelaxationPassID); }
void RISCVPassConfig::addPreEmitPass() {
addPass(&BranchRelaxationPassID);
addPass(createRISCVMakeCompressibleOptPass());
}
void RISCVPassConfig::addPreEmitPass2() {
addPass(createRISCVExpandPseudoPass());

View File

@@ -60,6 +60,7 @@
; CHECK-NEXT: Insert XRay ops
; CHECK-NEXT: Implement the 'patchable-function' attribute
; CHECK-NEXT: Branch relaxation pass
; CHECK-NEXT: RISCV Make Compressible
; CHECK-NEXT: Contiguously Lay Out Funclets
; CHECK-NEXT: StackMap Liveness Analysis
; CHECK-NEXT: Live DEBUG_VALUE analysis

View File

@@ -151,6 +151,7 @@
; CHECK-NEXT: Insert XRay ops
; CHECK-NEXT: Implement the 'patchable-function' attribute
; CHECK-NEXT: Branch relaxation pass
; CHECK-NEXT: RISCV Make Compressible
; CHECK-NEXT: Contiguously Lay Out Funclets
; CHECK-NEXT: StackMap Liveness Analysis
; CHECK-NEXT: Live DEBUG_VALUE analysis

View File

@@ -0,0 +1,341 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -o - %s -mtriple=riscv64 -mattr=+c -simplify-mir \
# RUN: -run-pass=riscv-make-compressible | FileCheck %s
--- |
define void @store_common_value(i64* %a, i64* %b, i64* %c) #0 {
entry:
store i64 0, i64* %a, align 8
store i64 0, i64* %b, align 8
store i64 0, i64* %c, align 8
ret void
}
define void @store_common_ptr(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
entry:
store volatile i64 1, i64* %p, align 8
store volatile i64 3, i64* %p, align 8
store volatile i64 5, i64* %p, align 8
ret void
}
define void @store_common_ptr_self(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
entry:
%q = bitcast i64* %p to i64**
store volatile i64 1, i64* %p, align 8
store volatile i64 3, i64* %p, align 8
store volatile i64* %p, i64** %q, align 8
ret void
}
define void @load_common_ptr(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
entry:
%g = load volatile i64, i64* %p, align 8
%h = load volatile i64, i64* %p, align 8
%i = load volatile i64, i64* %p, align 8
ret void
}
define void @store_large_offset(i64* %p) #0 {
entry:
%0 = getelementptr inbounds i64, i64* %p, i64 100
store volatile i64 1, i64* %0, align 8
%1 = getelementptr inbounds i64, i64* %p, i64 101
store volatile i64 3, i64* %1, align 8
%2 = getelementptr inbounds i64, i64* %p, i64 102
store volatile i64 5, i64* %2, align 8
%3 = getelementptr inbounds i64, i64* %p, i64 103
store volatile i64 7, i64* %3, align 8
ret void
}
define void @load_large_offset(i64* %p) #0 {
entry:
%0 = getelementptr inbounds i64, i64* %p, i64 100
%a = load volatile i64, i64* %0, align 8
%1 = getelementptr inbounds i64, i64* %p, i64 101
%b = load volatile i64, i64* %1, align 8
%2 = getelementptr inbounds i64, i64* %p, i64 102
%c = load volatile i64, i64* %2, align 8
%3 = getelementptr inbounds i64, i64* %p, i64 103
%d = load volatile i64, i64* %3, align 8
ret void
}
define void @store_common_value_no_opt(i64* %a) #0 {
entry:
store i64 0, i64* %a, align 8
ret void
}
define void @store_common_ptr_no_opt(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
entry:
store volatile i64 1, i64* %p, align 8
ret void
}
define void @load_common_ptr_no_opt(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64* %p) #0 {
entry:
%g = load volatile i64, i64* %p, align 8
ret void
}
define void @store_large_offset_no_opt(i64* %p) #0 {
entry:
%0 = getelementptr inbounds i64, i64* %p, i64 100
store volatile i64 1, i64* %0, align 8
%1 = getelementptr inbounds i64, i64* %p, i64 101
store volatile i64 3, i64* %1, align 8
ret void
}
define void @load_large_offset_no_opt(i64* %p) #0 {
entry:
%0 = getelementptr inbounds i64, i64* %p, i64 100
%a = load volatile i64, i64* %0, align 8
%1 = getelementptr inbounds i64, i64* %p, i64 101
%b = load volatile i64, i64* %1, align 8
ret void
}
attributes #0 = { minsize "target-features"="+c" }
...
---
name: store_common_value
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x10, $x11, $x12
; CHECK-LABEL: name: store_common_value
; CHECK: liveins: $x10, $x11, $x12
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x13 = ADDI $x0, 0
; CHECK-NEXT: SD $x13, killed renamable $x10, 0 :: (store (s64) into %ir.a)
; CHECK-NEXT: SD $x13, killed renamable $x11, 0 :: (store (s64) into %ir.b)
; CHECK-NEXT: SD $x13, killed renamable $x12, 0 :: (store (s64) into %ir.c)
; CHECK-NEXT: PseudoRET
SD $x0, killed renamable $x10, 0 :: (store (s64) into %ir.a)
SD $x0, killed renamable $x11, 0 :: (store (s64) into %ir.b)
SD $x0, killed renamable $x12, 0 :: (store (s64) into %ir.c)
PseudoRET
...
---
name: store_common_ptr
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x16
; CHECK-LABEL: name: store_common_ptr
; CHECK: liveins: $x16
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $x10 = ADDI $x0, 1
; CHECK-NEXT: $x11 = ADDI $x16, 0
; CHECK-NEXT: SD killed renamable $x10, $x11, 0 :: (volatile store (s64) into %ir.p)
; CHECK-NEXT: renamable $x10 = ADDI $x0, 3
; CHECK-NEXT: SD killed renamable $x10, $x11, 0 :: (volatile store (s64) into %ir.p)
; CHECK-NEXT: renamable $x10 = ADDI $x0, 5
; CHECK-NEXT: SD killed renamable $x10, killed $x11, 0 :: (volatile store (s64) into %ir.p)
; CHECK-NEXT: PseudoRET
renamable $x10 = ADDI $x0, 1
SD killed renamable $x10, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
renamable $x10 = ADDI $x0, 3
SD killed renamable $x10, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
renamable $x10 = ADDI $x0, 5
SD killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p)
PseudoRET
...
---
name: store_common_ptr_self
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x16
; CHECK-LABEL: name: store_common_ptr_self
; CHECK: liveins: $x16
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $x10 = ADDI $x0, 1
; CHECK-NEXT: $x11 = ADDI $x16, 0
; CHECK-NEXT: SD killed renamable $x10, $x11, 0 :: (volatile store (s64) into %ir.p)
; CHECK-NEXT: renamable $x10 = ADDI $x0, 3
; CHECK-NEXT: SD killed renamable $x10, $x11, 0 :: (volatile store (s64) into %ir.p)
; CHECK-NEXT: SD killed $x11, $x11, 0 :: (volatile store (s64) into %ir.q)
; CHECK-NEXT: PseudoRET
renamable $x10 = ADDI $x0, 1
SD killed renamable $x10, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
renamable $x10 = ADDI $x0, 3
SD killed renamable $x10, renamable $x16, 0 :: (volatile store (s64) into %ir.p)
SD killed renamable $x16, renamable $x16, 0 :: (volatile store (s64) into %ir.q)
PseudoRET
...
---
name: load_common_ptr
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x16
; CHECK-LABEL: name: load_common_ptr
; CHECK: liveins: $x16
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x11 = ADDI $x16, 0
; CHECK-NEXT: dead renamable $x10 = LD $x11, 0 :: (volatile load (s64) from %ir.p)
; CHECK-NEXT: dead renamable $x10 = LD $x11, 0 :: (volatile load (s64) from %ir.p)
; CHECK-NEXT: dead renamable $x10 = LD killed $x11, 0 :: (volatile load (s64) from %ir.p)
; CHECK-NEXT: PseudoRET
dead renamable $x10 = LD renamable $x16, 0 :: (volatile load (s64) from %ir.p)
dead renamable $x10 = LD renamable $x16, 0 :: (volatile load (s64) from %ir.p)
dead renamable $x10 = LD killed renamable $x16, 0 :: (volatile load (s64) from %ir.p)
PseudoRET
...
---
name: store_large_offset
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x10
; CHECK-LABEL: name: store_large_offset
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
; CHECK-NEXT: $x12 = ADDI $x10, 768
; CHECK-NEXT: SD killed renamable $x11, $x12, 32 :: (volatile store (s64) into %ir.0)
; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
; CHECK-NEXT: SD killed renamable $x11, $x12, 40 :: (volatile store (s64) into %ir.1)
; CHECK-NEXT: renamable $x11 = ADDI $x0, 5
; CHECK-NEXT: SD killed renamable $x11, $x12, 48 :: (volatile store (s64) into %ir.2)
; CHECK-NEXT: renamable $x11 = ADDI $x0, 7
; CHECK-NEXT: SD killed renamable $x11, killed $x12, 56 :: (volatile store (s64) into %ir.3)
; CHECK-NEXT: PseudoRET
renamable $x11 = ADDI $x0, 1
SD killed renamable $x11, renamable $x10, 800 :: (volatile store (s64) into %ir.0)
renamable $x11 = ADDI $x0, 3
SD killed renamable $x11, renamable $x10, 808 :: (volatile store (s64) into %ir.1)
renamable $x11 = ADDI $x0, 5
SD killed renamable $x11, renamable $x10, 816 :: (volatile store (s64) into %ir.2)
renamable $x11 = ADDI $x0, 7
SD killed renamable $x11, killed renamable $x10, 824 :: (volatile store (s64) into %ir.3)
PseudoRET
...
---
name: load_large_offset
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x10
; CHECK-LABEL: name: load_large_offset
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x12 = ADDI $x10, 768
; CHECK-NEXT: dead renamable $x11 = LD $x12, 32 :: (volatile load (s64) from %ir.0)
; CHECK-NEXT: dead renamable $x11 = LD $x12, 40 :: (volatile load (s64) from %ir.1)
; CHECK-NEXT: dead renamable $x11 = LD $x12, 48 :: (volatile load (s64) from %ir.2)
; CHECK-NEXT: dead renamable $x10 = LD killed $x12, 56 :: (volatile load (s64) from %ir.3)
; CHECK-NEXT: PseudoRET
dead renamable $x11 = LD renamable $x10, 800 :: (volatile load (s64) from %ir.0)
dead renamable $x11 = LD renamable $x10, 808 :: (volatile load (s64) from %ir.1)
dead renamable $x11 = LD renamable $x10, 816 :: (volatile load (s64) from %ir.2)
dead renamable $x10 = LD killed renamable $x10, 824 :: (volatile load (s64) from %ir.3)
PseudoRET
...
---
name: store_common_value_no_opt
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x10
; CHECK-LABEL: name: store_common_value_no_opt
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: SD $x0, killed renamable $x10, 0 :: (store (s64) into %ir.a)
; CHECK-NEXT: PseudoRET
SD $x0, killed renamable $x10, 0 :: (store (s64) into %ir.a)
PseudoRET
...
---
name: store_common_ptr_no_opt
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x16
; CHECK-LABEL: name: store_common_ptr_no_opt
; CHECK: liveins: $x16
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $x10 = ADDI $x0, 1
; CHECK-NEXT: SD killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p)
; CHECK-NEXT: PseudoRET
renamable $x10 = ADDI $x0, 1
SD killed renamable $x10, killed renamable $x16, 0 :: (volatile store (s64) into %ir.p)
PseudoRET
...
---
name: load_common_ptr_no_opt
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x16
; CHECK-LABEL: name: load_common_ptr_no_opt
; CHECK: liveins: $x16
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: dead renamable $x10 = LD killed renamable $x16, 0 :: (volatile load (s64) from %ir.p)
; CHECK-NEXT: PseudoRET
dead renamable $x10 = LD killed renamable $x16, 0 :: (volatile load (s64) from %ir.p)
PseudoRET
...
---
name: store_large_offset_no_opt
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x10
; CHECK-LABEL: name: store_large_offset_no_opt
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: renamable $x11 = ADDI $x0, 1
; CHECK-NEXT: SD killed renamable $x11, renamable $x10, 800 :: (volatile store (s64) into %ir.0)
; CHECK-NEXT: renamable $x11 = ADDI $x0, 3
; CHECK-NEXT: SD killed renamable $x11, killed renamable $x10, 808 :: (volatile store (s64) into %ir.1)
; CHECK-NEXT: PseudoRET
renamable $x11 = ADDI $x0, 1
SD killed renamable $x11, renamable $x10, 800 :: (volatile store (s64) into %ir.0)
renamable $x11 = ADDI $x0, 3
SD killed renamable $x11, killed renamable $x10, 808 :: (volatile store (s64) into %ir.1)
PseudoRET
...
---
name: load_large_offset_no_opt
tracksRegLiveness: true
body: |
bb.0.entry:
liveins: $x10
; CHECK-LABEL: name: load_large_offset_no_opt
; CHECK: liveins: $x10
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: dead renamable $x11 = LD renamable $x10, 800 :: (volatile load (s64) from %ir.0)
; CHECK-NEXT: dead renamable $x10 = LD killed renamable $x10, 808 :: (volatile load (s64) from %ir.1)
; CHECK-NEXT: PseudoRET
dead renamable $x11 = LD renamable $x10, 800 :: (volatile load (s64) from %ir.0)
dead renamable $x10 = LD killed renamable $x10, 808 :: (volatile load (s64) from %ir.1)
PseudoRET
...

File diff suppressed because it is too large Load Diff