This change introduces a default schedule model for the RISCV target which leaves everything unchanged except the MicroOpBufferSize. The default value of this flag in NoSched is 0. Both configurations represent in order cores (i.e. no reorder window), the difference between them comes down to whether heuristics other than latency are allowed to apply. (Implementation details below) I left the processor models which explicitly set MicroOpBufferSize=0 unchanged in this patch, but strongly suspect we should change those too. Honestly, I think the LLVM wide default for this flag should be changed, but don't have the energy to manage the updates for all targets. Implementation wise, the effect of this change is that schedule units which are ready to run *except that* one of their predecessors may not have completed yet are added to the Available list, not the Pending one. The result of this is that it becomes possible to chose to schedule a node before it's ready cycle if the heuristics prefer. This is essentially chosing to insert a resource stall instead of e.g. increasing register pressure. Note that I was initially concerned there might be a correctness aspect (as in some kind of exposed pipeline design), but the generic scheduler doesn't seem to know how to insert noop instructions. Without that, a program wouldn't be guaranteed to schedule on an exposed pipeline depending on the program and schedule model in question. The effect of this is that we sometimes prefer register pressure in codegen results. This is mostly churn (or small wins) on scalar because we have many more registers, but is of major importance on vector - particularly high LMUL - because we effectively have many fewer registers and the relative cost of spilling is much higher. This is a significant improvement on high LMUL code quality for default rva23u configurations - or any non -mcpu vector configuration for that matter. Fixes #107532
296 lines
10 KiB
LLVM
296 lines
10 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc < %s -mtriple=riscv32 -mattr=+m \
|
|
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32
|
|
; RUN: llc < %s -mtriple=riscv64 -mattr=+m \
|
|
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64
|
|
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+unaligned-scalar-mem \
|
|
; RUN: | FileCheck %s --check-prefixes=RV32-BOTH,RV32-FAST
|
|
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+unaligned-scalar-mem \
|
|
; RUN: | FileCheck %s --check-prefixes=RV64-BOTH,RV64-FAST
|
|
|
|
; TODO: Due to the initial naive lowering implementation of memset.pattern in
|
|
; PreISelIntrinsicLowering, the generated code is not good.
|
|
|
|
define void @memset_1(ptr %a, i128 %value) nounwind {
|
|
; RV32-BOTH-LABEL: memset_1:
|
|
; RV32-BOTH: # %bb.0: # %loadstoreloop.preheader
|
|
; RV32-BOTH-NEXT: li a2, 0
|
|
; RV32-BOTH-NEXT: li a3, 0
|
|
; RV32-BOTH-NEXT: lw a4, 0(a1)
|
|
; RV32-BOTH-NEXT: lw a5, 4(a1)
|
|
; RV32-BOTH-NEXT: lw a6, 8(a1)
|
|
; RV32-BOTH-NEXT: lw a1, 12(a1)
|
|
; RV32-BOTH-NEXT: .LBB0_1: # %loadstoreloop
|
|
; RV32-BOTH-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV32-BOTH-NEXT: slli a7, a2, 4
|
|
; RV32-BOTH-NEXT: addi a2, a2, 1
|
|
; RV32-BOTH-NEXT: add a7, a0, a7
|
|
; RV32-BOTH-NEXT: sw a4, 0(a7)
|
|
; RV32-BOTH-NEXT: sw a5, 4(a7)
|
|
; RV32-BOTH-NEXT: sw a6, 8(a7)
|
|
; RV32-BOTH-NEXT: sw a1, 12(a7)
|
|
; RV32-BOTH-NEXT: seqz a7, a2
|
|
; RV32-BOTH-NEXT: add a3, a3, a7
|
|
; RV32-BOTH-NEXT: or a7, a2, a3
|
|
; RV32-BOTH-NEXT: beqz a7, .LBB0_1
|
|
; RV32-BOTH-NEXT: # %bb.2: # %split
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: memset_1:
|
|
; RV64-BOTH: # %bb.0: # %loadstoreloop.preheader
|
|
; RV64-BOTH-NEXT: addi a3, a0, 16
|
|
; RV64-BOTH-NEXT: .LBB0_1: # %loadstoreloop
|
|
; RV64-BOTH-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV64-BOTH-NEXT: sd a1, 0(a0)
|
|
; RV64-BOTH-NEXT: sd a2, 8(a0)
|
|
; RV64-BOTH-NEXT: addi a0, a0, 16
|
|
; RV64-BOTH-NEXT: bne a0, a3, .LBB0_1
|
|
; RV64-BOTH-NEXT: # %bb.2: # %split
|
|
; RV64-BOTH-NEXT: ret
|
|
tail call void @llvm.experimental.memset.pattern(ptr align 8 %a, i128 %value, i64 1, i1 0)
|
|
ret void
|
|
}
|
|
|
|
define void @memset_1_noalign(ptr %a, i128 %value) nounwind {
|
|
; RV32-LABEL: memset_1_noalign:
|
|
; RV32: # %bb.0: # %loadstoreloop.preheader
|
|
; RV32-NEXT: addi sp, sp, -32
|
|
; RV32-NEXT: sw s0, 28(sp) # 4-byte Folded Spill
|
|
; RV32-NEXT: sw s1, 24(sp) # 4-byte Folded Spill
|
|
; RV32-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
|
|
; RV32-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
|
|
; RV32-NEXT: sw s4, 12(sp) # 4-byte Folded Spill
|
|
; RV32-NEXT: li a2, 0
|
|
; RV32-NEXT: li a3, 0
|
|
; RV32-NEXT: lw a4, 0(a1)
|
|
; RV32-NEXT: lw a5, 4(a1)
|
|
; RV32-NEXT: lw a6, 8(a1)
|
|
; RV32-NEXT: lw a1, 12(a1)
|
|
; RV32-NEXT: srli a7, a5, 24
|
|
; RV32-NEXT: srli t0, a5, 16
|
|
; RV32-NEXT: srli t1, a5, 8
|
|
; RV32-NEXT: srli t2, a4, 24
|
|
; RV32-NEXT: srli t3, a4, 16
|
|
; RV32-NEXT: srli t4, a4, 8
|
|
; RV32-NEXT: srli t5, a6, 24
|
|
; RV32-NEXT: srli t6, a6, 16
|
|
; RV32-NEXT: srli s0, a6, 8
|
|
; RV32-NEXT: srli s1, a1, 24
|
|
; RV32-NEXT: srli s2, a1, 16
|
|
; RV32-NEXT: srli s3, a1, 8
|
|
; RV32-NEXT: .LBB1_1: # %loadstoreloop
|
|
; RV32-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV32-NEXT: slli s4, a2, 4
|
|
; RV32-NEXT: addi a2, a2, 1
|
|
; RV32-NEXT: add s4, a0, s4
|
|
; RV32-NEXT: sb a5, 4(s4)
|
|
; RV32-NEXT: sb t1, 5(s4)
|
|
; RV32-NEXT: sb t0, 6(s4)
|
|
; RV32-NEXT: sb a7, 7(s4)
|
|
; RV32-NEXT: sb a4, 0(s4)
|
|
; RV32-NEXT: sb t4, 1(s4)
|
|
; RV32-NEXT: sb t3, 2(s4)
|
|
; RV32-NEXT: sb t2, 3(s4)
|
|
; RV32-NEXT: sb a6, 8(s4)
|
|
; RV32-NEXT: sb s0, 9(s4)
|
|
; RV32-NEXT: sb t6, 10(s4)
|
|
; RV32-NEXT: sb t5, 11(s4)
|
|
; RV32-NEXT: sb a1, 12(s4)
|
|
; RV32-NEXT: sb s3, 13(s4)
|
|
; RV32-NEXT: sb s2, 14(s4)
|
|
; RV32-NEXT: sb s1, 15(s4)
|
|
; RV32-NEXT: seqz s4, a2
|
|
; RV32-NEXT: add a3, a3, s4
|
|
; RV32-NEXT: or s4, a2, a3
|
|
; RV32-NEXT: beqz s4, .LBB1_1
|
|
; RV32-NEXT: # %bb.2: # %split
|
|
; RV32-NEXT: lw s0, 28(sp) # 4-byte Folded Reload
|
|
; RV32-NEXT: lw s1, 24(sp) # 4-byte Folded Reload
|
|
; RV32-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
|
|
; RV32-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
|
|
; RV32-NEXT: lw s4, 12(sp) # 4-byte Folded Reload
|
|
; RV32-NEXT: addi sp, sp, 32
|
|
; RV32-NEXT: ret
|
|
;
|
|
; RV64-LABEL: memset_1_noalign:
|
|
; RV64: # %bb.0: # %loadstoreloop.preheader
|
|
; RV64-NEXT: addi sp, sp, -32
|
|
; RV64-NEXT: sd s0, 24(sp) # 8-byte Folded Spill
|
|
; RV64-NEXT: sd s1, 16(sp) # 8-byte Folded Spill
|
|
; RV64-NEXT: sd s2, 8(sp) # 8-byte Folded Spill
|
|
; RV64-NEXT: addi a3, a0, 16
|
|
; RV64-NEXT: srli a4, a1, 56
|
|
; RV64-NEXT: srli a5, a1, 48
|
|
; RV64-NEXT: srli a6, a1, 40
|
|
; RV64-NEXT: srli a7, a1, 32
|
|
; RV64-NEXT: srli t0, a1, 24
|
|
; RV64-NEXT: srli t1, a1, 16
|
|
; RV64-NEXT: srli t2, a1, 8
|
|
; RV64-NEXT: srli t3, a2, 56
|
|
; RV64-NEXT: srli t4, a2, 48
|
|
; RV64-NEXT: srli t5, a2, 40
|
|
; RV64-NEXT: srli t6, a2, 32
|
|
; RV64-NEXT: srli s0, a2, 24
|
|
; RV64-NEXT: srli s1, a2, 16
|
|
; RV64-NEXT: srli s2, a2, 8
|
|
; RV64-NEXT: .LBB1_1: # %loadstoreloop
|
|
; RV64-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV64-NEXT: sb a7, 4(a0)
|
|
; RV64-NEXT: sb a6, 5(a0)
|
|
; RV64-NEXT: sb a5, 6(a0)
|
|
; RV64-NEXT: sb a4, 7(a0)
|
|
; RV64-NEXT: sb a1, 0(a0)
|
|
; RV64-NEXT: sb t2, 1(a0)
|
|
; RV64-NEXT: sb t1, 2(a0)
|
|
; RV64-NEXT: sb t0, 3(a0)
|
|
; RV64-NEXT: sb t6, 12(a0)
|
|
; RV64-NEXT: sb t5, 13(a0)
|
|
; RV64-NEXT: sb t4, 14(a0)
|
|
; RV64-NEXT: sb t3, 15(a0)
|
|
; RV64-NEXT: sb a2, 8(a0)
|
|
; RV64-NEXT: sb s2, 9(a0)
|
|
; RV64-NEXT: sb s1, 10(a0)
|
|
; RV64-NEXT: sb s0, 11(a0)
|
|
; RV64-NEXT: addi a0, a0, 16
|
|
; RV64-NEXT: bne a0, a3, .LBB1_1
|
|
; RV64-NEXT: # %bb.2: # %split
|
|
; RV64-NEXT: ld s0, 24(sp) # 8-byte Folded Reload
|
|
; RV64-NEXT: ld s1, 16(sp) # 8-byte Folded Reload
|
|
; RV64-NEXT: ld s2, 8(sp) # 8-byte Folded Reload
|
|
; RV64-NEXT: addi sp, sp, 32
|
|
; RV64-NEXT: ret
|
|
;
|
|
; RV32-FAST-LABEL: memset_1_noalign:
|
|
; RV32-FAST: # %bb.0: # %loadstoreloop.preheader
|
|
; RV32-FAST-NEXT: li a2, 0
|
|
; RV32-FAST-NEXT: li a3, 0
|
|
; RV32-FAST-NEXT: lw a4, 0(a1)
|
|
; RV32-FAST-NEXT: lw a5, 4(a1)
|
|
; RV32-FAST-NEXT: lw a6, 8(a1)
|
|
; RV32-FAST-NEXT: lw a1, 12(a1)
|
|
; RV32-FAST-NEXT: .LBB1_1: # %loadstoreloop
|
|
; RV32-FAST-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV32-FAST-NEXT: slli a7, a2, 4
|
|
; RV32-FAST-NEXT: addi a2, a2, 1
|
|
; RV32-FAST-NEXT: add a7, a0, a7
|
|
; RV32-FAST-NEXT: sw a4, 0(a7)
|
|
; RV32-FAST-NEXT: sw a5, 4(a7)
|
|
; RV32-FAST-NEXT: sw a6, 8(a7)
|
|
; RV32-FAST-NEXT: sw a1, 12(a7)
|
|
; RV32-FAST-NEXT: seqz a7, a2
|
|
; RV32-FAST-NEXT: add a3, a3, a7
|
|
; RV32-FAST-NEXT: or a7, a2, a3
|
|
; RV32-FAST-NEXT: beqz a7, .LBB1_1
|
|
; RV32-FAST-NEXT: # %bb.2: # %split
|
|
; RV32-FAST-NEXT: ret
|
|
;
|
|
; RV64-FAST-LABEL: memset_1_noalign:
|
|
; RV64-FAST: # %bb.0: # %loadstoreloop.preheader
|
|
; RV64-FAST-NEXT: addi a3, a0, 16
|
|
; RV64-FAST-NEXT: .LBB1_1: # %loadstoreloop
|
|
; RV64-FAST-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV64-FAST-NEXT: sd a1, 0(a0)
|
|
; RV64-FAST-NEXT: sd a2, 8(a0)
|
|
; RV64-FAST-NEXT: addi a0, a0, 16
|
|
; RV64-FAST-NEXT: bne a0, a3, .LBB1_1
|
|
; RV64-FAST-NEXT: # %bb.2: # %split
|
|
; RV64-FAST-NEXT: ret
|
|
tail call void @llvm.experimental.memset.pattern(ptr %a, i128 %value, i64 1, i1 0)
|
|
ret void
|
|
}
|
|
|
|
define void @memset_4(ptr %a, i128 %value) nounwind {
|
|
; RV32-BOTH-LABEL: memset_4:
|
|
; RV32-BOTH: # %bb.0: # %loadstoreloop.preheader
|
|
; RV32-BOTH-NEXT: li a2, 0
|
|
; RV32-BOTH-NEXT: li a3, 0
|
|
; RV32-BOTH-NEXT: lw a4, 0(a1)
|
|
; RV32-BOTH-NEXT: lw a5, 4(a1)
|
|
; RV32-BOTH-NEXT: lw a6, 8(a1)
|
|
; RV32-BOTH-NEXT: lw a1, 12(a1)
|
|
; RV32-BOTH-NEXT: .LBB2_1: # %loadstoreloop
|
|
; RV32-BOTH-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV32-BOTH-NEXT: slli a7, a2, 4
|
|
; RV32-BOTH-NEXT: addi a2, a2, 1
|
|
; RV32-BOTH-NEXT: add a7, a0, a7
|
|
; RV32-BOTH-NEXT: seqz t0, a2
|
|
; RV32-BOTH-NEXT: sw a4, 0(a7)
|
|
; RV32-BOTH-NEXT: sw a5, 4(a7)
|
|
; RV32-BOTH-NEXT: sw a6, 8(a7)
|
|
; RV32-BOTH-NEXT: sw a1, 12(a7)
|
|
; RV32-BOTH-NEXT: add a3, a3, t0
|
|
; RV32-BOTH-NEXT: seqz a7, a3
|
|
; RV32-BOTH-NEXT: sltiu t0, a2, 4
|
|
; RV32-BOTH-NEXT: and a7, a7, t0
|
|
; RV32-BOTH-NEXT: bnez a7, .LBB2_1
|
|
; RV32-BOTH-NEXT: # %bb.2: # %split
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: memset_4:
|
|
; RV64-BOTH: # %bb.0: # %loadstoreloop.preheader
|
|
; RV64-BOTH-NEXT: addi a3, a0, 64
|
|
; RV64-BOTH-NEXT: .LBB2_1: # %loadstoreloop
|
|
; RV64-BOTH-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV64-BOTH-NEXT: sd a1, 0(a0)
|
|
; RV64-BOTH-NEXT: sd a2, 8(a0)
|
|
; RV64-BOTH-NEXT: addi a0, a0, 16
|
|
; RV64-BOTH-NEXT: bne a0, a3, .LBB2_1
|
|
; RV64-BOTH-NEXT: # %bb.2: # %split
|
|
; RV64-BOTH-NEXT: ret
|
|
tail call void @llvm.experimental.memset.pattern(ptr align 8 %a, i128 %value, i64 4, i1 0)
|
|
ret void
|
|
}
|
|
|
|
define void @memset_x(ptr %a, i128 %value, i64 %x) nounwind {
|
|
; RV32-BOTH-LABEL: memset_x:
|
|
; RV32-BOTH: # %bb.0:
|
|
; RV32-BOTH-NEXT: or a4, a2, a3
|
|
; RV32-BOTH-NEXT: beqz a4, .LBB3_5
|
|
; RV32-BOTH-NEXT: # %bb.1: # %loadstoreloop.preheader
|
|
; RV32-BOTH-NEXT: li a4, 0
|
|
; RV32-BOTH-NEXT: li a5, 0
|
|
; RV32-BOTH-NEXT: lw a6, 0(a1)
|
|
; RV32-BOTH-NEXT: lw a7, 4(a1)
|
|
; RV32-BOTH-NEXT: lw t0, 8(a1)
|
|
; RV32-BOTH-NEXT: lw a1, 12(a1)
|
|
; RV32-BOTH-NEXT: j .LBB3_3
|
|
; RV32-BOTH-NEXT: .LBB3_2: # %loadstoreloop
|
|
; RV32-BOTH-NEXT: # in Loop: Header=BB3_3 Depth=1
|
|
; RV32-BOTH-NEXT: sltu t1, a5, a3
|
|
; RV32-BOTH-NEXT: beqz t1, .LBB3_5
|
|
; RV32-BOTH-NEXT: .LBB3_3: # %loadstoreloop
|
|
; RV32-BOTH-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV32-BOTH-NEXT: slli t1, a4, 4
|
|
; RV32-BOTH-NEXT: addi a4, a4, 1
|
|
; RV32-BOTH-NEXT: add t1, a0, t1
|
|
; RV32-BOTH-NEXT: sw a6, 0(t1)
|
|
; RV32-BOTH-NEXT: sw a7, 4(t1)
|
|
; RV32-BOTH-NEXT: sw t0, 8(t1)
|
|
; RV32-BOTH-NEXT: sw a1, 12(t1)
|
|
; RV32-BOTH-NEXT: seqz t1, a4
|
|
; RV32-BOTH-NEXT: add a5, a5, t1
|
|
; RV32-BOTH-NEXT: bne a5, a3, .LBB3_2
|
|
; RV32-BOTH-NEXT: # %bb.4: # in Loop: Header=BB3_3 Depth=1
|
|
; RV32-BOTH-NEXT: sltu t1, a4, a2
|
|
; RV32-BOTH-NEXT: bnez t1, .LBB3_3
|
|
; RV32-BOTH-NEXT: .LBB3_5: # %split
|
|
; RV32-BOTH-NEXT: ret
|
|
;
|
|
; RV64-BOTH-LABEL: memset_x:
|
|
; RV64-BOTH: # %bb.0:
|
|
; RV64-BOTH-NEXT: beqz a3, .LBB3_3
|
|
; RV64-BOTH-NEXT: # %bb.1: # %loadstoreloop.preheader
|
|
; RV64-BOTH-NEXT: li a4, 0
|
|
; RV64-BOTH-NEXT: .LBB3_2: # %loadstoreloop
|
|
; RV64-BOTH-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; RV64-BOTH-NEXT: sd a1, 0(a0)
|
|
; RV64-BOTH-NEXT: sd a2, 8(a0)
|
|
; RV64-BOTH-NEXT: addi a4, a4, 1
|
|
; RV64-BOTH-NEXT: addi a0, a0, 16
|
|
; RV64-BOTH-NEXT: bltu a4, a3, .LBB3_2
|
|
; RV64-BOTH-NEXT: .LBB3_3: # %split
|
|
; RV64-BOTH-NEXT: ret
|
|
tail call void @llvm.experimental.memset.pattern(ptr align 8 %a, i128 %value, i64 %x, i1 0)
|
|
ret void
|
|
}
|