As of revea222be0d, LLVMs assembler will actually try to honour the "fill value" part of p2align directives. X86 printed these as 0x90, which isn't actually what it wanted: we want multi-byte nops for .text padding. Compiling via a textual assembly file produces single-byte nop padding sinceea222be0dbut the built-in assembler will produce multi-byte nops. This divergent behaviour is undesirable. To fix: don't set the byte padding field for x86, which allows the assembler to pick multi-byte nops. Test that we get the same multi-byte padding when compiled via textual assembly or directly to object file. Added same-align-bytes-with-llasm-llobj.ll to that effect, updated numerous other tests to not contain check-lines for the explicit padding.
178 lines
7.1 KiB
LLVM
178 lines
7.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX1
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX512
|
|
|
|
define i32 @PR63108() {
|
|
; SSE-LABEL: PR63108:
|
|
; SSE: # %bb.0: # %entry
|
|
; SSE-NEXT: xorl %eax, %eax
|
|
; SSE-NEXT: testb %al, %al
|
|
; SSE-NEXT: je .LBB0_2
|
|
; SSE-NEXT: # %bb.1:
|
|
; SSE-NEXT: movd {{.*#+}} xmm0 = [57339,0,0,0]
|
|
; SSE-NEXT: jmp .LBB0_5
|
|
; SSE-NEXT: .LBB0_2: # %vector.body.preheader
|
|
; SSE-NEXT: pxor %xmm0, %xmm0
|
|
; SSE-NEXT: movd {{.*#+}} xmm1 = [57339,0,0,0]
|
|
; SSE-NEXT: xorl %eax, %eax
|
|
; SSE-NEXT: .p2align 4
|
|
; SSE-NEXT: .LBB0_3: # %vector.body
|
|
; SSE-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; SSE-NEXT: movdqa %xmm1, %xmm2
|
|
; SSE-NEXT: testb %al, %al
|
|
; SSE-NEXT: pxor %xmm1, %xmm1
|
|
; SSE-NEXT: jne .LBB0_3
|
|
; SSE-NEXT: # %bb.4: # %middle.block
|
|
; SSE-NEXT: pxor %xmm2, %xmm0
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; SSE-NEXT: pxor %xmm0, %xmm1
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
|
|
; SSE-NEXT: pxor %xmm1, %xmm2
|
|
; SSE-NEXT: movdqa %xmm2, %xmm0
|
|
; SSE-NEXT: psrld $16, %xmm0
|
|
; SSE-NEXT: pxor %xmm2, %xmm0
|
|
; SSE-NEXT: .LBB0_5: # %for.cond.cleanup
|
|
; SSE-NEXT: movd %xmm0, %eax
|
|
; SSE-NEXT: movsbl %al, %ecx
|
|
; SSE-NEXT: shrl $8, %eax
|
|
; SSE-NEXT: movsbl %al, %eax
|
|
; SSE-NEXT: addl %ecx, %eax
|
|
; SSE-NEXT: retq
|
|
;
|
|
; AVX1-LABEL: PR63108:
|
|
; AVX1: # %bb.0: # %entry
|
|
; AVX1-NEXT: xorl %eax, %eax
|
|
; AVX1-NEXT: testb %al, %al
|
|
; AVX1-NEXT: je .LBB0_2
|
|
; AVX1-NEXT: # %bb.1:
|
|
; AVX1-NEXT: vmovd {{.*#+}} xmm0 = [57339,0,0,0]
|
|
; AVX1-NEXT: jmp .LBB0_5
|
|
; AVX1-NEXT: .LBB0_2: # %vector.body.preheader
|
|
; AVX1-NEXT: vmovss {{.*#+}} xmm0 = [57339,0,0,0]
|
|
; AVX1-NEXT: xorl %eax, %eax
|
|
; AVX1-NEXT: .p2align 4
|
|
; AVX1-NEXT: .LBB0_3: # %vector.body
|
|
; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; AVX1-NEXT: vmovaps %ymm0, %ymm1
|
|
; AVX1-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: testb %al, %al
|
|
; AVX1-NEXT: jne .LBB0_3
|
|
; AVX1-NEXT: # %bb.4: # %middle.block
|
|
; AVX1-NEXT: vxorps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX1-NEXT: vxorps %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX1-NEXT: .LBB0_5: # %for.cond.cleanup
|
|
; AVX1-NEXT: vmovd %xmm0, %eax
|
|
; AVX1-NEXT: vpextrb $1, %xmm0, %ecx
|
|
; AVX1-NEXT: movsbl %al, %edx
|
|
; AVX1-NEXT: movsbl %cl, %eax
|
|
; AVX1-NEXT: addl %edx, %eax
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: PR63108:
|
|
; AVX2: # %bb.0: # %entry
|
|
; AVX2-NEXT: xorl %eax, %eax
|
|
; AVX2-NEXT: testb %al, %al
|
|
; AVX2-NEXT: je .LBB0_2
|
|
; AVX2-NEXT: # %bb.1:
|
|
; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm0 = [251,223,251,223,251,223,251,223,251,223,251,223,251,223,251,223]
|
|
; AVX2-NEXT: jmp .LBB0_5
|
|
; AVX2-NEXT: .LBB0_2: # %vector.body.preheader
|
|
; AVX2-NEXT: vmovd {{.*#+}} xmm0 = [57339,0,0,0]
|
|
; AVX2-NEXT: xorl %eax, %eax
|
|
; AVX2-NEXT: .p2align 4
|
|
; AVX2-NEXT: .LBB0_3: # %vector.body
|
|
; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; AVX2-NEXT: vmovdqa %ymm0, %ymm1
|
|
; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: testb %al, %al
|
|
; AVX2-NEXT: jne .LBB0_3
|
|
; AVX2-NEXT: # %bb.4: # %middle.block
|
|
; AVX2-NEXT: vpxor {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: .LBB0_5: # %for.cond.cleanup
|
|
; AVX2-NEXT: vmovd %xmm0, %eax
|
|
; AVX2-NEXT: vpextrb $1, %xmm0, %ecx
|
|
; AVX2-NEXT: movsbl %al, %edx
|
|
; AVX2-NEXT: movsbl %cl, %eax
|
|
; AVX2-NEXT: addl %edx, %eax
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: PR63108:
|
|
; AVX512: # %bb.0: # %entry
|
|
; AVX512-NEXT: xorl %eax, %eax
|
|
; AVX512-NEXT: testb %al, %al
|
|
; AVX512-NEXT: je .LBB0_2
|
|
; AVX512-NEXT: # %bb.1:
|
|
; AVX512-NEXT: vpbroadcastw {{.*#+}} xmm0 = [251,223,251,223,251,223,251,223,251,223,251,223,251,223,251,223]
|
|
; AVX512-NEXT: jmp .LBB0_5
|
|
; AVX512-NEXT: .LBB0_2: # %vector.body.preheader
|
|
; AVX512-NEXT: vmovd {{.*#+}} xmm0 = [57339,0,0,0]
|
|
; AVX512-NEXT: xorl %eax, %eax
|
|
; AVX512-NEXT: .p2align 4
|
|
; AVX512-NEXT: .LBB0_3: # %vector.body
|
|
; AVX512-NEXT: # =>This Inner Loop Header: Depth=1
|
|
; AVX512-NEXT: vmovdqa %ymm0, %ymm1
|
|
; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: testb %al, %al
|
|
; AVX512-NEXT: jne .LBB0_3
|
|
; AVX512-NEXT: # %bb.4: # %middle.block
|
|
; AVX512-NEXT: vpxord {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to8}, %ymm1, %ymm0
|
|
; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
|
|
; AVX512-NEXT: .LBB0_5: # %for.cond.cleanup
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: vpextrb $1, %xmm0, %ecx
|
|
; AVX512-NEXT: movsbl %al, %edx
|
|
; AVX512-NEXT: movsbl %cl, %eax
|
|
; AVX512-NEXT: addl %edx, %eax
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
entry:
|
|
br i1 poison, label %for.cond.cleanup, label %vector.body
|
|
|
|
vector.body: ; preds = %vector.body, %entry
|
|
%vec.phi = phi <16 x i16> [ zeroinitializer, %vector.body ], [ <i16 -8197, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>, %entry ]
|
|
%0 = icmp eq i32 poison, poison
|
|
br i1 %0, label %middle.block, label %vector.body
|
|
|
|
middle.block: ; preds = %vector.body
|
|
%bin.rdx22 = xor <16 x i16> %vec.phi, <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>
|
|
%1 = tail call i16 @llvm.vector.reduce.xor.v16i16(<16 x i16> %bin.rdx22)
|
|
%2 = bitcast i16 %1 to <2 x i8>
|
|
br label %for.cond.cleanup
|
|
|
|
for.cond.cleanup: ; preds = %middle.block, %entry
|
|
%id15142.0.lcssa = phi <2 x i8> [ <i8 -5, i8 -33>, %entry ], [ %2, %middle.block ]
|
|
%vecext = extractelement <2 x i8> %id15142.0.lcssa, i64 0
|
|
%vecext8 = extractelement <2 x i8> %id15142.0.lcssa, i64 1
|
|
%conv7 = sext i8 %vecext to i32
|
|
%conv9 = sext i8 %vecext8 to i32
|
|
%res = add i32 %conv7, %conv9
|
|
ret i32 %res
|
|
}
|
|
declare i16 @llvm.vector.reduce.xor.v16i16(<16 x i16>)
|