diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp index d7569ab0ea59..b1491b75ac5b 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp @@ -429,6 +429,26 @@ bool LoongArchAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count, return true; } +bool LoongArchAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA, + const MCFragment &F) { + // If the section does not contain linker-relaxable fragments, PC-relative + // fixups can be resolved. + if (!F.getParent()->isLinkerRelaxable()) + return true; + + // Otherwise, check if the offset between the symbol and fragment is fully + // resolved, unaffected by linker-relaxable fragments (e.g. instructions or + // offset-affected MCAlignFragment). Complements the generic + // isSymbolRefDifferenceFullyResolvedImpl. + if (!PCRelTemp) + PCRelTemp = getContext().createTempSymbol(); + PCRelTemp->setFragment(const_cast(&F)); + MCValue Res; + MCExpr::evaluateSymbolicAdd(Asm, false, MCValue::get(SymA), + MCValue::get(nullptr, PCRelTemp), Res); + return !Res.getSubSym(); +} + bool LoongArchAsmBackend::addReloc(const MCFragment &F, const MCFixup &Fixup, const MCValue &Target, uint64_t &FixedValue, bool IsResolved) { @@ -447,19 +467,24 @@ bool LoongArchAsmBackend::addReloc(const MCFragment &F, const MCFixup &Fixup, if (!force) { const MCSection &SecA = SA.getSection(); const MCSection &SecB = SB.getSection(); + const MCSection &SecCur = *F.getParent(); - // We need record relocation if SecA != SecB. Usually SecB is same as the - // section of Fixup, which will be record the relocation as PCRel. If SecB - // is not same as the section of Fixup, it will report error. Just return - // false and then this work can be finished by handleFixup. - if (&SecA != &SecB) + // To handle the case of A - B which B is same section with the current, + // generate PCRel relocations is better than ADD/SUB relocation pair. + // We can resolve it as A - PC + PC - B. The A - PC will be resolved + // as a PCRel relocation, while PC - B will serve as the addend. + // If the linker relaxation is disabled, it can be done directly since + // PC - B is constant. Otherwise, we should evaluate whether PC - B + // is constant. If it can be resolved as PCRel, use Fallback which + // generates R_LARCH_{32,64}_PCREL relocation later. + if (&SecA != &SecB && &SecB == &SecCur && + isPCRelFixupResolved(Target.getSubSym(), F)) return Fallback(); - // In SecA == SecB case. If the linker relaxation is enabled, we need - // record the ADD, SUB relocations. Otherwise the FixedValue has already - // been calc- ulated out in evaluateFixup, return true and avoid record - // relocations. - if (!STI.hasFeature(LoongArch::FeatureRelax)) + // In SecA == SecB case. If the linker relaxation is disabled, the + // FixedValue has already been calculated out in evaluateFixup, + // return true and avoid record relocations. + if (&SecA == &SecB && !STI.hasFeature(LoongArch::FeatureRelax)) return true; } diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h index aeedafe2b44b..56554c5c664e 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h @@ -30,6 +30,10 @@ class LoongArchAsmBackend : public MCAsmBackend { bool Is64Bit; const MCTargetOptions &TargetOptions; DenseMap SecToAlignSym; + // Temporary symbol used to check whether a PC-relative fixup is resolved. + MCSymbol *PCRelTemp = nullptr; + + bool isPCRelFixupResolved(const MCSymbol *SymA, const MCFragment &F); public: LoongArchAsmBackend(const MCSubtargetInfo &STI, uint8_t OSABI, bool Is64Bit, diff --git a/llvm/test/MC/LoongArch/Misc/cfi-advance.s b/llvm/test/MC/LoongArch/Misc/cfi-advance.s index 662c43e6bcea..38eba7caf610 100644 --- a/llvm/test/MC/LoongArch/Misc/cfi-advance.s +++ b/llvm/test/MC/LoongArch/Misc/cfi-advance.s @@ -1,6 +1,8 @@ # RUN: llvm-mc --filetype=obj --triple=loongarch64 -mattr=-relax %s -o %t.o # RUN: llvm-readobj -r %t.o | FileCheck --check-prefix=RELOC %s # RUN: llvm-dwarfdump --debug-frame %t.o | FileCheck --check-prefix=DWARFDUMP %s +# RUN: llvm-mc --filetype=obj --triple=loongarch64 -mattr=+relax %s \ +# RUN: | llvm-readobj -r - | FileCheck --check-prefix=RELAX %s # RELOC: Relocations [ # RELOC-NEXT: .rela.eh_frame { @@ -12,6 +14,16 @@ # DWARFDUMP-NEXT: DW_CFA_advance_loc: 8 # DWARFDUMP-NEXT: DW_CFA_def_cfa_offset: +8 +# RELAX: Relocations [ +# RELAX: .rela.eh_frame { +# RELAX-NEXT: 0x1C R_LARCH_32_PCREL .L{{.*}} 0x0 +# RELAX-NEXT: 0x20 R_LARCH_ADD32 .L{{.*}} 0x0 +# RELAX-NEXT: 0x20 R_LARCH_SUB32 .L{{.*}} 0x0 +# RELAX-NEXT: 0x28 R_LARCH_ADD6 .L{{.*}} 0x0 +# RELAX-NEXT: 0x28 R_LARCH_SUB6 .L{{.*}} 0x0 +# RELAX-NEXT: } +# RELAX-NEXT: ] + .text .globl test .p2align 2 diff --git a/llvm/test/MC/LoongArch/Relocations/fde-reloc.s b/llvm/test/MC/LoongArch/Relocations/fde-reloc.s index 990e07c7f00b..ab911d1853a8 100644 --- a/llvm/test/MC/LoongArch/Relocations/fde-reloc.s +++ b/llvm/test/MC/LoongArch/Relocations/fde-reloc.s @@ -1,5 +1,7 @@ -# RUN: llvm-mc --filetype=obj --triple=loongarch64 < %s \ +# RUN: llvm-mc --filetype=obj --triple=loongarch64 --mattr=-relax < %s \ # RUN: | llvm-readobj -r - | FileCheck %s +# RUN: llvm-mc --filetype=obj --triple=loongarch64 --mattr=+relax < %s \ +# RUN: | llvm-readobj -r - | FileCheck %s --check-prefix=RELAX ## Ensure that the eh_frame records the symbolic difference with ## the R_LARCH_32_PCREL relocation. @@ -12,3 +14,6 @@ func: # CHECK: Section (4) .rela.eh_frame { # CHECK-NEXT: 0x1C R_LARCH_32_PCREL .text 0x0 # CHECK-NEXT: } +# RELAX: Section ({{.*}}) .rela.eh_frame { +# RELAX-NEXT: 0x1C R_LARCH_32_PCREL .L{{.*}} 0x0 +# RELAX-NEXT: } diff --git a/llvm/test/MC/LoongArch/Relocations/sub-expr.s b/llvm/test/MC/LoongArch/Relocations/sub-expr.s index 0179e1027af8..8bf046acc697 100644 --- a/llvm/test/MC/LoongArch/Relocations/sub-expr.s +++ b/llvm/test/MC/LoongArch/Relocations/sub-expr.s @@ -1,28 +1,95 @@ -# RUN: llvm-mc --filetype=obj --triple=loongarch64 %s -o %t -# RUN: llvm-readobj -r %t | FileCheck %s +# RUN: llvm-mc --filetype=obj --triple=loongarch64 --mattr=-relax %s \ +# RUN: | llvm-readobj -r - | FileCheck %s +# RUN: llvm-mc --filetype=obj --triple=loongarch64 --mattr=+relax %s \ +# RUN: | llvm-readobj -r - | FileCheck %s --check-prefix=RELAX ## Check that subtraction expressions emit R_LARCH_32_PCREL and R_LARCH_64_PCREL relocations. ## TODO: 1- or 2-byte data relocations are not supported for now. # CHECK: Relocations [ -# CHECK-NEXT: Section ({{.*}}) .rela.data { -# CHECK-NEXT: 0x0 R_LARCH_64_PCREL sx 0x0 -# CHECK-NEXT: 0x8 R_LARCH_64_PCREL sy 0x0 -# CHECK-NEXT: 0x10 R_LARCH_32_PCREL sx 0x0 -# CHECK-NEXT: 0x14 R_LARCH_32_PCREL sy 0x0 -# CHECK-NEXT: } +# CHECK-NEXT: Section ({{.*}}) .rela.sx { +# CHECK-NEXT: 0x4 R_LARCH_PCALA_HI20 z 0x0 +# CHECK-NEXT: 0x8 R_LARCH_PCALA_LO12 z 0x0 +# CHECK-NEXT: 0xC R_LARCH_32_PCREL .sy 0xC +# CHECK-NEXT: } +# CHECK-NEXT: Section ({{.*}}) .rela.data { +# CHECK-NEXT: 0x0 R_LARCH_64_PCREL .sx 0x4 +# CHECK-NEXT: 0x8 R_LARCH_64_PCREL .sy 0x4 +# CHECK-NEXT: 0x10 R_LARCH_32_PCREL .sx 0x4 +# CHECK-NEXT: 0x14 R_LARCH_32_PCREL .sy 0x4 +# CHECK-NEXT: 0x18 R_LARCH_ADD64 .sx 0x4 +# CHECK-NEXT: 0x18 R_LARCH_SUB64 .sy 0x4 +# CHECK-NEXT: 0x20 R_LARCH_ADD64 .sy 0x4 +# CHECK-NEXT: 0x20 R_LARCH_SUB64 .sx 0x4 +# CHECK-NEXT: 0x28 R_LARCH_ADD32 .sx 0x4 +# CHECK-NEXT: 0x28 R_LARCH_SUB32 .sy 0x4 +# CHECK-NEXT: 0x2C R_LARCH_ADD32 .sy 0x4 +# CHECK-NEXT: 0x2C R_LARCH_SUB32 .sx 0x4 +# CHECK-NEXT: 0x30 R_LARCH_ADD64 .data 0x30 +# CHECK-NEXT: 0x30 R_LARCH_SUB64 .sx 0x4 +# CHECK-NEXT: 0x38 R_LARCH_ADD32 .data 0x38 +# CHECK-NEXT: 0x38 R_LARCH_SUB32 .sy 0x4 +# CHECK-NEXT: } +# CHECK-NEXT: Section ({{.*}}) .rela.sy { +# CHECK-NEXT: 0x10 R_LARCH_32_PCREL .sx 0x10 +# CHECK-NEXT: } +# CHECK-NEXT: ] -.section sx,"a" -x: +# RELAX: Relocations [ +# RELAX-NEXT: Section ({{.*}}) .rela.sx { +# RELAX-NEXT: 0x4 R_LARCH_PCALA_HI20 z 0x0 +# RELAX-NEXT: 0x4 R_LARCH_RELAX - 0x0 +# RELAX-NEXT: 0x8 R_LARCH_PCALA_LO12 z 0x0 +# RELAX-NEXT: 0x8 R_LARCH_RELAX - 0x0 +# RELAX-NEXT: 0xC R_LARCH_ADD32 y 0x0 +# RELAX-NEXT: 0xC R_LARCH_SUB32 x 0x0 +# RELAX-NEXT: } +# RELAX-NEXT: Section ({{.*}}) .rela.data { +# RELAX-NEXT: 0x0 R_LARCH_64_PCREL x 0x0 +# RELAX-NEXT: 0x8 R_LARCH_64_PCREL y 0x0 +# RELAX-NEXT: 0x10 R_LARCH_32_PCREL x 0x0 +# RELAX-NEXT: 0x14 R_LARCH_32_PCREL y 0x0 +# RELAX-NEXT: 0x18 R_LARCH_ADD64 x 0x0 +# RELAX-NEXT: 0x18 R_LARCH_SUB64 y 0x0 +# RELAX-NEXT: 0x20 R_LARCH_ADD64 y 0x0 +# RELAX-NEXT: 0x20 R_LARCH_SUB64 x 0x0 +# RELAX-NEXT: 0x28 R_LARCH_ADD32 x 0x0 +# RELAX-NEXT: 0x28 R_LARCH_SUB32 y 0x0 +# RELAX-NEXT: 0x2C R_LARCH_ADD32 y 0x0 +# RELAX-NEXT: 0x2C R_LARCH_SUB32 x 0x0 +# RELAX-NEXT: 0x30 R_LARCH_ADD64 {{.*}} 0x0 +# RELAX-NEXT: 0x30 R_LARCH_SUB64 x 0x0 +# RELAX-NEXT: 0x38 R_LARCH_ADD32 {{.*}} 0x0 +# RELAX-NEXT: 0x38 R_LARCH_SUB32 y 0x0 +# RELAX-NEXT: } +# RELAX-NEXT: Section ({{.*}}) .rela.sy { +# RELAX-NEXT: 0x4 R_LARCH_ALIGN - 0xC +# RELAX-NEXT: 0x10 R_LARCH_ADD32 x 0x0 +# RELAX-NEXT: 0x10 R_LARCH_SUB32 y 0x0 +# RELAX-NEXT: } +# RELAX-NEXT: ] + +.section .sx,"ax" nop +x: +la.pcrel $a0, z +.4byte y-x .data .8byte x-. .8byte y-. .4byte x-. .4byte y-. +.8byte x-y +.8byte y-x +.4byte x-y +.4byte y-x +.8byte .-x +.4byte .-y -.section sy,"a" -y: +.section .sy,"ax" nop +y: +.p2align 4 +.4byte x-y