Files
clang-p2996/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/unaligned-buffer.ll
Piotr Sobczak 170c0dac44 [AMDGPU] Fix edge case of buffer OOB handling (#115479)
Strengthen out-of-bounds guarantees for buffer accesses by disallowing
buffer accesses with alignment lower than natural alignment.

This is needed to specifically address the edge case where an access
starts out-of-bounds and then enters in-bounds, as the hardware would
treat the entire access as being out-of-bounds. This is normally not
needed for most users, but at least one graphics device extension
(VK_EXT_robustness2) has very strict requirements - in-bounds accesses
must return correct value, and out-of-bounds accesses must return zero.

The direct consequence of the patch is that a buffer access at negative
address is not merged by load-store-vectorizer with one at a positive
address, which fixes a CTS test.

Targets that do not care about the new behavior are advised to use the
new target feature relaxed-buffer-oob-mode that maintains the state from
before the patch.
2025-03-07 08:56:44 +01:00

81 lines
4.6 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -S -o - %s | FileCheck --check-prefix=OOB-STRICT %s
; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -mattr=+relaxed-buffer-oob-mode -S -o - %s | FileCheck --check-prefixes=OOB-RELAXED %s
; The test checks that relaxed-buffer-oob-mode allows merging loads even if the target load is not naturally aligned.
define amdgpu_kernel void @merge_align_4(ptr addrspace(7) captures(none) %p) #0 {
;
; OOB-STRICT-LABEL: define amdgpu_kernel void @merge_align_4(
; OOB-STRICT-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) {
; OOB-STRICT-NEXT: [[ENTRY:.*:]]
; OOB-STRICT-NEXT: [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
; OOB-STRICT-NEXT: [[LD_M8:%.*]] = load i32, ptr addrspace(7) [[GEP_M8]], align 4
; OOB-STRICT-NEXT: [[GEP_M4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -4
; OOB-STRICT-NEXT: [[LD_M4:%.*]] = load i32, ptr addrspace(7) [[GEP_M4]], align 4
; OOB-STRICT-NEXT: [[GEP_0:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 0
; OOB-STRICT-NEXT: [[LD_0:%.*]] = load i32, ptr addrspace(7) [[GEP_0]], align 4
; OOB-STRICT-NEXT: [[GEP_4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i64 4
; OOB-STRICT-NEXT: [[LD_4:%.*]] = load i32, ptr addrspace(7) [[GEP_4]], align 4
; OOB-STRICT-NEXT: ret void
;
; OOB-RELAXED-LABEL: define amdgpu_kernel void @merge_align_4(
; OOB-RELAXED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) #[[ATTR0:[0-9]+]] {
; OOB-RELAXED-NEXT: [[ENTRY:.*:]]
; OOB-RELAXED-NEXT: [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
; OOB-RELAXED-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 4
; OOB-RELAXED-NEXT: [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
; OOB-RELAXED-NEXT: [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
; OOB-RELAXED-NEXT: [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
; OOB-RELAXED-NEXT: [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
; OOB-RELAXED-NEXT: ret void
;
entry:
%gep_m8 = getelementptr i8, ptr addrspace(7) %p, i32 -8
%ld_m8 = load i32, ptr addrspace(7) %gep_m8, align 4
%gep_m4 = getelementptr i8, ptr addrspace(7) %p, i32 -4
%ld_m4 = load i32, ptr addrspace(7) %gep_m4, align 4
%gep_0 = getelementptr i8, ptr addrspace(7) %p, i32 0
%ld_0 = load i32, ptr addrspace(7) %gep_0, align 4
%gep_4 = getelementptr i8, ptr addrspace(7) %p, i64 4
%ld_4 = load i32, ptr addrspace(7) %gep_4, align 4
ret void
}
; The test checks that strict OOB mode (relaxed-buffer-oob-mode not set) allows merging loads if the target load is naturally aligned.
define amdgpu_kernel void @merge_align_16(ptr addrspace(7) captures(none) %p) #0 {
; OOB-STRICT-LABEL: define amdgpu_kernel void @merge_align_16(
; OOB-STRICT-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) {
; OOB-STRICT-NEXT: [[ENTRY:.*:]]
; OOB-STRICT-NEXT: [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
; OOB-STRICT-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
; OOB-STRICT-NEXT: [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
; OOB-STRICT-NEXT: [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
; OOB-STRICT-NEXT: [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
; OOB-STRICT-NEXT: [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
; OOB-STRICT-NEXT: ret void
;
; OOB-RELAXED-LABEL: define amdgpu_kernel void @merge_align_16(
; OOB-RELAXED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) #[[ATTR0]] {
; OOB-RELAXED-NEXT: [[ENTRY:.*:]]
; OOB-RELAXED-NEXT: [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
; OOB-RELAXED-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
; OOB-RELAXED-NEXT: [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
; OOB-RELAXED-NEXT: [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
; OOB-RELAXED-NEXT: [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
; OOB-RELAXED-NEXT: [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
; OOB-RELAXED-NEXT: ret void
;
entry:
%gep_m8 = getelementptr i8, ptr addrspace(7) %p, i32 -8
%ld_m8 = load i32, ptr addrspace(7) %gep_m8, align 16
%gep_m4 = getelementptr i8, ptr addrspace(7) %p, i32 -4
%ld_m4 = load i32, ptr addrspace(7) %gep_m4, align 4
%gep_0 = getelementptr i8, ptr addrspace(7) %p, i32 0
%ld_0 = load i32, ptr addrspace(7) %gep_0, align 8
%gep_4 = getelementptr i8, ptr addrspace(7) %p, i64 4
%ld_4 = load i32, ptr addrspace(7) %gep_4, align 4
ret void
}