Files
clang-p2996/llvm/test/CodeGen/RISCV/stack-store-check.ll
Philip Reames 859c871184 [RISCV] Default to MicroOpBufferSize = 1 for scheduling purposes (#126608)
This change introduces a default schedule model for the RISCV target
which leaves everything unchanged except the MicroOpBufferSize. The
default value of this flag in NoSched is 0. Both configurations
represent in order cores (i.e. no reorder window), the difference
between them comes down to whether heuristics other than latency are
allowed to apply. (Implementation details below)

I left the processor models which explicitly set MicroOpBufferSize=0
unchanged in this patch, but strongly suspect we should change those
too. Honestly, I think the LLVM wide default for this flag should be
changed, but don't have the energy to manage the updates for all
targets.

Implementation wise, the effect of this change is that schedule units
which are ready to run *except that* one of their predecessors may not
have completed yet are added to the Available list, not the Pending one.
The result of this is that it becomes possible to chose to schedule a
node before it's ready cycle if the heuristics prefer. This is
essentially chosing to insert a resource stall instead of e.g.
increasing register pressure.

Note that I was initially concerned there might be a correctness aspect
(as in some kind of exposed pipeline design), but the generic scheduler
doesn't seem to know how to insert noop instructions. Without that, a
program wouldn't be guaranteed to schedule on an exposed pipeline
depending on the program and schedule model in question.

The effect of this is that we sometimes prefer register pressure in
codegen results. This is mostly churn (or small wins) on scalar because
we have many more registers, but is of major importance on vector -
particularly high LMUL - because we effectively have many fewer
registers and the relative cost of spilling is much higher. This is a
significant improvement on high LMUL code quality for default rva23u
configurations - or any non -mcpu vector configuration for that matter.

Fixes #107532
2025-02-12 12:31:39 -08:00

327 lines
12 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple riscv32 -o - %s | FileCheck %s
; This test has been minimized from GCC Torture Suite's regstack-1.c
; and checks that RISCVInstrInfo::storeRegToStackSlot works at the basic
; level.
@U = external local_unnamed_addr global fp128, align 16
@Y1 = external local_unnamed_addr global fp128, align 16
@X = external local_unnamed_addr global fp128, align 16
@Y = external local_unnamed_addr global fp128, align 16
@T = external local_unnamed_addr global fp128, align 16
@S = external local_unnamed_addr global fp128, align 16
define void @main() local_unnamed_addr nounwind {
; CHECK-LABEL: main:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -704
; CHECK-NEXT: sw ra, 700(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s0, 696(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s1, 692(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s2, 688(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s3, 684(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s4, 680(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s5, 676(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s6, 672(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s7, 668(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s8, 664(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s9, 660(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s10, 656(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s11, 652(sp) # 4-byte Folded Spill
; CHECK-NEXT: lui a0, %hi(U)
; CHECK-NEXT: lw s6, %lo(U)(a0)
; CHECK-NEXT: lw s7, %lo(U+4)(a0)
; CHECK-NEXT: lw s8, %lo(U+8)(a0)
; CHECK-NEXT: lw s0, %lo(U+12)(a0)
; CHECK-NEXT: sw zero, 616(sp)
; CHECK-NEXT: sw zero, 620(sp)
; CHECK-NEXT: sw zero, 624(sp)
; CHECK-NEXT: sw zero, 628(sp)
; CHECK-NEXT: sw s6, 600(sp)
; CHECK-NEXT: sw s7, 604(sp)
; CHECK-NEXT: sw s8, 608(sp)
; CHECK-NEXT: sw s0, 612(sp)
; CHECK-NEXT: addi a0, sp, 632
; CHECK-NEXT: addi a1, sp, 616
; CHECK-NEXT: addi a2, sp, 600
; CHECK-NEXT: call __subtf3
; CHECK-NEXT: lw s1, 632(sp)
; CHECK-NEXT: lw s2, 636(sp)
; CHECK-NEXT: lw s3, 640(sp)
; CHECK-NEXT: lw s4, 644(sp)
; CHECK-NEXT: sw s6, 552(sp)
; CHECK-NEXT: sw s7, 556(sp)
; CHECK-NEXT: sw s8, 560(sp)
; CHECK-NEXT: sw s0, 564(sp)
; CHECK-NEXT: sw s1, 568(sp)
; CHECK-NEXT: sw s2, 572(sp)
; CHECK-NEXT: sw s3, 576(sp)
; CHECK-NEXT: sw s4, 580(sp)
; CHECK-NEXT: addi a0, sp, 584
; CHECK-NEXT: addi a1, sp, 568
; CHECK-NEXT: addi a2, sp, 552
; CHECK-NEXT: call __subtf3
; CHECK-NEXT: lw a0, 584(sp)
; CHECK-NEXT: sw a0, 52(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a0, 588(sp)
; CHECK-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a0, 592(sp)
; CHECK-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a0, 596(sp)
; CHECK-NEXT: sw a0, 40(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw zero, 504(sp)
; CHECK-NEXT: sw zero, 508(sp)
; CHECK-NEXT: sw zero, 512(sp)
; CHECK-NEXT: sw zero, 516(sp)
; CHECK-NEXT: sw s6, 520(sp)
; CHECK-NEXT: sw s7, 524(sp)
; CHECK-NEXT: sw s8, 528(sp)
; CHECK-NEXT: sw s0, 532(sp)
; CHECK-NEXT: addi a0, sp, 536
; CHECK-NEXT: addi a1, sp, 520
; CHECK-NEXT: addi a2, sp, 504
; CHECK-NEXT: call __addtf3
; CHECK-NEXT: lw s5, 536(sp)
; CHECK-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw s9, 540(sp)
; CHECK-NEXT: sw s9, 32(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw s10, 544(sp)
; CHECK-NEXT: sw s10, 28(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw s11, 548(sp)
; CHECK-NEXT: sw s11, 24(sp) # 4-byte Folded Spill
; CHECK-NEXT: lui a0, %hi(Y1)
; CHECK-NEXT: lw a1, %lo(Y1)(a0)
; CHECK-NEXT: sw a1, 20(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a2, %lo(Y1+4)(a0)
; CHECK-NEXT: sw a2, 16(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a3, %lo(Y1+8)(a0)
; CHECK-NEXT: sw a3, 12(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a0, %lo(Y1+12)(a0)
; CHECK-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw a1, 312(sp)
; CHECK-NEXT: sw a2, 316(sp)
; CHECK-NEXT: sw a3, 320(sp)
; CHECK-NEXT: sw a0, 324(sp)
; CHECK-NEXT: sw s1, 328(sp)
; CHECK-NEXT: sw s2, 332(sp)
; CHECK-NEXT: sw s3, 336(sp)
; CHECK-NEXT: sw s4, 340(sp)
; CHECK-NEXT: addi a0, sp, 344
; CHECK-NEXT: addi a1, sp, 328
; CHECK-NEXT: addi a2, sp, 312
; CHECK-NEXT: call __multf3
; CHECK-NEXT: lw a0, 344(sp)
; CHECK-NEXT: sw a0, 68(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a0, 348(sp)
; CHECK-NEXT: sw a0, 64(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a0, 352(sp)
; CHECK-NEXT: sw a0, 60(sp) # 4-byte Folded Spill
; CHECK-NEXT: lw a0, 356(sp)
; CHECK-NEXT: sw a0, 56(sp) # 4-byte Folded Spill
; CHECK-NEXT: sw s6, 472(sp)
; CHECK-NEXT: sw s7, 476(sp)
; CHECK-NEXT: sw s8, 480(sp)
; CHECK-NEXT: sw s0, 484(sp)
; CHECK-NEXT: sw s5, 456(sp)
; CHECK-NEXT: sw s9, 460(sp)
; CHECK-NEXT: sw s10, 464(sp)
; CHECK-NEXT: sw s11, 468(sp)
; CHECK-NEXT: addi a0, sp, 488
; CHECK-NEXT: addi a1, sp, 472
; CHECK-NEXT: addi a2, sp, 456
; CHECK-NEXT: call __addtf3
; CHECK-NEXT: lw a0, 488(sp)
; CHECK-NEXT: lw a1, 492(sp)
; CHECK-NEXT: lw a2, 496(sp)
; CHECK-NEXT: lw a3, 500(sp)
; CHECK-NEXT: sw zero, 424(sp)
; CHECK-NEXT: sw zero, 428(sp)
; CHECK-NEXT: sw zero, 432(sp)
; CHECK-NEXT: sw zero, 436(sp)
; CHECK-NEXT: sw a0, 408(sp)
; CHECK-NEXT: sw a1, 412(sp)
; CHECK-NEXT: sw a2, 416(sp)
; CHECK-NEXT: sw a3, 420(sp)
; CHECK-NEXT: addi a0, sp, 440
; CHECK-NEXT: addi a1, sp, 424
; CHECK-NEXT: addi a2, sp, 408
; CHECK-NEXT: call __subtf3
; CHECK-NEXT: lw a0, 440(sp)
; CHECK-NEXT: lw a1, 444(sp)
; CHECK-NEXT: lw a2, 448(sp)
; CHECK-NEXT: lw a3, 452(sp)
; CHECK-NEXT: lui a4, %hi(X)
; CHECK-NEXT: sw a3, %lo(X+12)(a4)
; CHECK-NEXT: sw a2, %lo(X+8)(a4)
; CHECK-NEXT: sw a1, %lo(X+4)(a4)
; CHECK-NEXT: sw a0, %lo(X)(a4)
; CHECK-NEXT: lw s1, 20(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw s1, 216(sp)
; CHECK-NEXT: lw s2, 16(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw s2, 220(sp)
; CHECK-NEXT: lw s3, 12(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw s3, 224(sp)
; CHECK-NEXT: lw s4, 8(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw s4, 228(sp)
; CHECK-NEXT: lw s5, 52(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw s5, 232(sp)
; CHECK-NEXT: lw s9, 48(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw s9, 236(sp)
; CHECK-NEXT: lw s10, 44(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw s10, 240(sp)
; CHECK-NEXT: lw s11, 40(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw s11, 244(sp)
; CHECK-NEXT: addi a0, sp, 248
; CHECK-NEXT: addi a1, sp, 232
; CHECK-NEXT: addi a2, sp, 216
; CHECK-NEXT: call __multf3
; CHECK-NEXT: lw s0, 248(sp)
; CHECK-NEXT: lw s6, 252(sp)
; CHECK-NEXT: lw s7, 256(sp)
; CHECK-NEXT: lw s8, 260(sp)
; CHECK-NEXT: sw zero, 360(sp)
; CHECK-NEXT: sw zero, 364(sp)
; CHECK-NEXT: sw zero, 368(sp)
; CHECK-NEXT: sw zero, 372(sp)
; CHECK-NEXT: lw a0, 36(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a0, 376(sp)
; CHECK-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a0, 380(sp)
; CHECK-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a0, 384(sp)
; CHECK-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a0, 388(sp)
; CHECK-NEXT: addi a0, sp, 392
; CHECK-NEXT: addi a1, sp, 376
; CHECK-NEXT: addi a2, sp, 360
; CHECK-NEXT: call __multf3
; CHECK-NEXT: lw a0, 392(sp)
; CHECK-NEXT: lw a1, 396(sp)
; CHECK-NEXT: lw a2, 400(sp)
; CHECK-NEXT: lw a3, 404(sp)
; CHECK-NEXT: lui a4, %hi(S)
; CHECK-NEXT: sw a3, %lo(S+12)(a4)
; CHECK-NEXT: sw a2, %lo(S+8)(a4)
; CHECK-NEXT: sw a1, %lo(S+4)(a4)
; CHECK-NEXT: sw a0, %lo(S)(a4)
; CHECK-NEXT: sw s5, 264(sp)
; CHECK-NEXT: sw s9, 268(sp)
; CHECK-NEXT: sw s10, 272(sp)
; CHECK-NEXT: sw s11, 276(sp)
; CHECK-NEXT: lw a0, 68(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a0, 280(sp)
; CHECK-NEXT: lw a0, 64(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a0, 284(sp)
; CHECK-NEXT: lw a0, 60(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a0, 288(sp)
; CHECK-NEXT: lw a0, 56(sp) # 4-byte Folded Reload
; CHECK-NEXT: sw a0, 292(sp)
; CHECK-NEXT: addi a0, sp, 296
; CHECK-NEXT: addi a1, sp, 280
; CHECK-NEXT: addi a2, sp, 264
; CHECK-NEXT: call __subtf3
; CHECK-NEXT: lw a0, 296(sp)
; CHECK-NEXT: lw a1, 300(sp)
; CHECK-NEXT: lw a2, 304(sp)
; CHECK-NEXT: lw a3, 308(sp)
; CHECK-NEXT: lui a4, %hi(T)
; CHECK-NEXT: sw a3, %lo(T+12)(a4)
; CHECK-NEXT: sw a2, %lo(T+8)(a4)
; CHECK-NEXT: sw a1, %lo(T+4)(a4)
; CHECK-NEXT: sw a0, %lo(T)(a4)
; CHECK-NEXT: sw zero, 168(sp)
; CHECK-NEXT: sw zero, 172(sp)
; CHECK-NEXT: sw zero, 176(sp)
; CHECK-NEXT: sw zero, 180(sp)
; CHECK-NEXT: sw s0, 184(sp)
; CHECK-NEXT: sw s6, 188(sp)
; CHECK-NEXT: sw s7, 192(sp)
; CHECK-NEXT: sw s8, 196(sp)
; CHECK-NEXT: addi a0, sp, 200
; CHECK-NEXT: addi a1, sp, 184
; CHECK-NEXT: addi a2, sp, 168
; CHECK-NEXT: call __addtf3
; CHECK-NEXT: lw a0, 200(sp)
; CHECK-NEXT: lw a1, 204(sp)
; CHECK-NEXT: lw a2, 208(sp)
; CHECK-NEXT: lw a3, 212(sp)
; CHECK-NEXT: lui a4, %hi(Y)
; CHECK-NEXT: sw a3, %lo(Y+12)(a4)
; CHECK-NEXT: sw a2, %lo(Y+8)(a4)
; CHECK-NEXT: sw a1, %lo(Y+4)(a4)
; CHECK-NEXT: sw a0, %lo(Y)(a4)
; CHECK-NEXT: sw zero, 120(sp)
; CHECK-NEXT: sw zero, 124(sp)
; CHECK-NEXT: sw zero, 128(sp)
; CHECK-NEXT: sw zero, 132(sp)
; CHECK-NEXT: sw s1, 136(sp)
; CHECK-NEXT: sw s2, 140(sp)
; CHECK-NEXT: sw s3, 144(sp)
; CHECK-NEXT: sw s4, 148(sp)
; CHECK-NEXT: addi a0, sp, 152
; CHECK-NEXT: addi a1, sp, 136
; CHECK-NEXT: addi a2, sp, 120
; CHECK-NEXT: call __multf3
; CHECK-NEXT: lw a2, 152(sp)
; CHECK-NEXT: lw a3, 156(sp)
; CHECK-NEXT: lw a4, 160(sp)
; CHECK-NEXT: lw a5, 164(sp)
; CHECK-NEXT: lui a1, 786400
; CHECK-NEXT: addi a0, sp, 104
; CHECK-NEXT: sw zero, 72(sp)
; CHECK-NEXT: sw zero, 76(sp)
; CHECK-NEXT: sw zero, 80(sp)
; CHECK-NEXT: sw a1, 84(sp)
; CHECK-NEXT: addi a1, sp, 88
; CHECK-NEXT: sw a2, 88(sp)
; CHECK-NEXT: sw a3, 92(sp)
; CHECK-NEXT: sw a4, 96(sp)
; CHECK-NEXT: sw a5, 100(sp)
; CHECK-NEXT: addi a2, sp, 72
; CHECK-NEXT: call __addtf3
; CHECK-NEXT: lw a0, 104(sp)
; CHECK-NEXT: lw a1, 108(sp)
; CHECK-NEXT: lw a2, 112(sp)
; CHECK-NEXT: lw a3, 116(sp)
; CHECK-NEXT: lui a4, %hi(Y1)
; CHECK-NEXT: sw a2, %lo(Y1+8)(a4)
; CHECK-NEXT: sw a3, %lo(Y1+12)(a4)
; CHECK-NEXT: sw a0, %lo(Y1)(a4)
; CHECK-NEXT: sw a1, %lo(Y1+4)(a4)
; CHECK-NEXT: lw ra, 700(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s0, 696(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s1, 692(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s2, 688(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s3, 684(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s4, 680(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s5, 676(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s6, 672(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s7, 668(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s8, 664(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s9, 660(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s10, 656(sp) # 4-byte Folded Reload
; CHECK-NEXT: lw s11, 652(sp) # 4-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 704
; CHECK-NEXT: ret
%1 = load fp128, ptr @U, align 16
%2 = fsub fp128 0xL00000000000000000000000000000000, %1
%3 = fsub fp128 %2, %1
%4 = fadd fp128 %1, 0xL00000000000000000000000000000000
%5 = load fp128, ptr @Y1, align 16
%6 = fmul fp128 %2, %5
%7 = fadd fp128 %1, %4
%8 = fsub fp128 0xL00000000000000000000000000000000, %7
store fp128 %8, ptr @X, align 16
%9 = fmul fp128 %3, %5
%10 = fmul fp128 0xL00000000000000000000000000000000, %4
store fp128 %10, ptr @S, align 16
%11 = fsub fp128 %6, %3
store fp128 %11, ptr @T, align 16
%12 = fadd fp128 0xL00000000000000000000000000000000, %9
store fp128 %12, ptr @Y, align 16
%13 = fmul fp128 0xL00000000000000000000000000000000, %5
%14 = fadd fp128 %13, 0xL0000000000000000BFFE000000000000
store fp128 %14, ptr @Y1, align 16
ret void
}