Files
clang-p2996/llvm/test/CodeGen/X86/avoid-sfb-kill-flags.mir
Matt Arsenault fae05692a3 CodeGen: Print/parse LLTs in MachineMemOperands
This will currently accept the old number of bytes syntax, and convert
it to a scalar. This should be removed in the near future (I think I
converted all of the tests already, but likely missed a few).

Not sure what the exact syntax and policy should be. We can continue
printing the number of bytes for non-generic instructions to avoid
test churn and only allow non-scalar types for generic instructions.

This will currently print the LLT in parentheses, but accept parsing
the existing integers and implicitly converting to scalar. The
parentheses are a bit ugly, but the parser logic seems unable to deal
without either parentheses or some keyword to indicate the start of a
type.
2021-06-30 16:54:13 -04:00

65 lines
2.6 KiB
YAML

# RUN: llc -o - %s -mtriple=x86_64-- -run-pass=x86-avoid-SFB | FileCheck %s
--- |
; ModuleID = '../test/CodeGen/X86/avoid-sfb-mir.ll'
source_filename = "../test/CodeGen/X86/avoid-sfb-mir.ll"
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
%struct.S = type { i32, i32, i32, i32 }
; Function Attrs: nounwind uwtable
define void @test_imm_store(%struct.S* noalias nocapture %s1, %struct.S* nocapture %s2, i32 %x, %struct.S* nocapture %s3) local_unnamed_addr #0 {
entry:
%a2 = bitcast %struct.S* %s1 to i32*
store i32 0, i32* %a2, align 4
%a13 = bitcast %struct.S* %s3 to i32*
store i32 1, i32* %a13, align 4
%0 = bitcast %struct.S* %s2 to i8*
%1 = bitcast %struct.S* %s1 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 16, i1 false)
ret void
}
declare void @bar(%struct.S*) local_unnamed_addr
; Function Attrs: argmemonly nounwind
declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1) #1
...
---
name: test_imm_store
alignment: 16
tracksRegLiveness: true
registers:
- { id: 0, class: gr64 }
- { id: 1, class: gr64 }
- { id: 2, class: gr32 }
- { id: 3, class: gr64 }
- { id: 4, class: vr128 }
liveins:
- { reg: '$rdi', virtual-reg: '%0' }
- { reg: '$rsi', virtual-reg: '%1' }
- { reg: '$rcx', virtual-reg: '%3' }
body: |
bb.0.entry:
liveins: $rdi, $rsi, $rcx
; CHECK: MOV32mi %0, 1, $noreg, 0, $noreg, 0 :: (store (s32) into %ir.a2)
; CHECK-NEXT: MOV32mi %3, 1, $noreg, 0, $noreg, 1 :: (store (s32) into %ir.a13)
; CHECK-NEXT: %5:gr32 = MOV32rm %0, 1, $noreg, 0, $noreg :: (load (s32) from %ir.1)
; CHECK-NEXT: MOV32mr %1, 1, $noreg, 0, $noreg, killed %5 :: (store (s32) into %ir.0)
; CHECK-NEXT: %6:gr64 = MOV64rm %0, 1, $noreg, 4, $noreg :: (load (s64) from %ir.1 + 4, align 4)
; CHECK-NEXT: MOV64mr %1, 1, $noreg, 4, $noreg, killed %6 :: (store (s64) into %ir.0 + 4, align 4)
; CHECK-NEXT: %7:gr32 = MOV32rm killed %0, 1, $noreg, 12, $noreg :: (load (s32) from %ir.1 + 12)
; CHECK-NEXT: MOV32mr killed %1, 1, $noreg, 12, $noreg, killed %7 :: (store (s32) into %ir.0 + 12)
%3:gr64 = COPY $rcx
%1:gr64 = COPY $rsi
%0:gr64 = COPY $rdi
MOV32mi %0, 1, $noreg, 0, $noreg, 0 :: (store (s32) into %ir.a2)
MOV32mi %3, 1, $noreg, 0, $noreg, 1 :: (store (s32) into %ir.a13)
%4:vr128 = MOVUPSrm killed %0, 1, $noreg, 0, $noreg :: (load (s128) from %ir.1, align 4)
MOVUPSmr killed %1, 1, $noreg, 0, $noreg, killed %4 :: (store (s128) into %ir.0, align 4)
RET 0
...