The idea behind this canonicalization is that it allows us to handle less patterns, because we know that some will be canonicalized away. This is indeed very useful to e.g. know that constants are always on the right. However, this is only useful if the canonicalization is actually reliable. This is the case for constants, but not for arguments: Moving these to the right makes it look like the "more complex" expression is guaranteed to be on the left, but this is not actually the case in practice. It fails as soon as you replace the argument with another instruction. The end result is that it looks like things correctly work in tests, while they actually don't. We use the "thwart complexity-based canonicalization" trick to handle this in tests, but it's often a challenge for new contributors to get this right, and based on the regressions this PR originally exposed, we clearly don't get this right in many cases. For this reason, I think that it's better to remove this complexity canonicalization. It will make it much easier to write tests for commuted cases and make sure that they are handled.
122 lines
7.0 KiB
LLVM
122 lines
7.0 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
|
|
|
|
; Fold zeroing of inactive lanes into the load's passthrough parameter.
|
|
define <4 x float> @masked_load_and_zero_inactive_1(ptr %ptr, <4 x i1> %mask) {
|
|
; CHECK-LABEL: @masked_load_and_zero_inactive_1(
|
|
; CHECK-NEXT: [[LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK:%.*]], <4 x float> zeroinitializer)
|
|
; CHECK-NEXT: ret <4 x float> [[LOAD]]
|
|
;
|
|
%load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x float> undef)
|
|
%masked = select <4 x i1> %mask, <4 x float> %load, <4 x float> zeroinitializer
|
|
ret <4 x float> %masked
|
|
}
|
|
|
|
; As above but reuse the load's existing passthrough.
|
|
define <4 x i32> @masked_load_and_zero_inactive_2(ptr %ptr, <4 x i1> %mask) {
|
|
; CHECK-LABEL: @masked_load_and_zero_inactive_2(
|
|
; CHECK-NEXT: [[LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK:%.*]], <4 x i32> zeroinitializer)
|
|
; CHECK-NEXT: ret <4 x i32> [[LOAD]]
|
|
;
|
|
%load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
|
|
%masked = select <4 x i1> %mask, <4 x i32> %load, <4 x i32> zeroinitializer
|
|
ret <4 x i32> %masked
|
|
}
|
|
|
|
; No transform when the load's passthrough cannot be reused or altered.
|
|
define <4 x i32> @masked_load_and_zero_inactive_3(ptr %ptr, <4 x i1> %mask, <4 x i32> %passthrough) {
|
|
; CHECK-LABEL: @masked_load_and_zero_inactive_3(
|
|
; CHECK-NEXT: [[LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK:%.*]], <4 x i32> [[PASSTHROUGH:%.*]])
|
|
; CHECK-NEXT: [[MASKED:%.*]] = select <4 x i1> [[MASK]], <4 x i32> [[LOAD]], <4 x i32> zeroinitializer
|
|
; CHECK-NEXT: ret <4 x i32> [[MASKED]]
|
|
;
|
|
%load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
|
|
%masked = select <4 x i1> %mask, <4 x i32> %load, <4 x i32> zeroinitializer
|
|
ret <4 x i32> %masked
|
|
}
|
|
|
|
; Remove redundant select when its mask doesn't overlap with the load mask.
|
|
define <4 x i32> @masked_load_and_zero_inactive_4(ptr %ptr, <4 x i1> %inv_mask) {
|
|
; CHECK-LABEL: @masked_load_and_zero_inactive_4(
|
|
; CHECK-NEXT: [[MASK:%.*]] = xor <4 x i1> [[INV_MASK:%.*]], <i1 true, i1 true, i1 true, i1 true>
|
|
; CHECK-NEXT: [[LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK]], <4 x i32> zeroinitializer)
|
|
; CHECK-NEXT: ret <4 x i32> [[LOAD]]
|
|
;
|
|
%mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
|
|
%load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> undef)
|
|
%masked = select <4 x i1> %inv_mask, <4 x i32> zeroinitializer, <4 x i32> %load
|
|
ret <4 x i32> %masked
|
|
}
|
|
|
|
; As above but reuse the load's existing passthrough.
|
|
define <4 x i32> @masked_load_and_zero_inactive_5(ptr %ptr, <4 x i1> %inv_mask) {
|
|
; CHECK-LABEL: @masked_load_and_zero_inactive_5(
|
|
; CHECK-NEXT: [[MASK:%.*]] = xor <4 x i1> [[INV_MASK:%.*]], <i1 true, i1 true, i1 true, i1 true>
|
|
; CHECK-NEXT: [[LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK]], <4 x i32> zeroinitializer)
|
|
; CHECK-NEXT: ret <4 x i32> [[LOAD]]
|
|
;
|
|
%mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
|
|
%load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> zeroinitializer)
|
|
%masked = select <4 x i1> %inv_mask, <4 x i32> zeroinitializer, <4 x i32> %load
|
|
ret <4 x i32> %masked
|
|
}
|
|
|
|
; No transform when the load's passthrough cannot be reused or altered.
|
|
define <4 x i32> @masked_load_and_zero_inactive_6(ptr %ptr, <4 x i1> %inv_mask, <4 x i32> %passthrough) {
|
|
; CHECK-LABEL: @masked_load_and_zero_inactive_6(
|
|
; CHECK-NEXT: [[MASK:%.*]] = xor <4 x i1> [[INV_MASK:%.*]], <i1 true, i1 true, i1 true, i1 true>
|
|
; CHECK-NEXT: [[LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK]], <4 x i32> [[PASSTHROUGH:%.*]])
|
|
; CHECK-NEXT: [[MASKED:%.*]] = select <4 x i1> [[INV_MASK]], <4 x i32> zeroinitializer, <4 x i32> [[LOAD]]
|
|
; CHECK-NEXT: ret <4 x i32> [[MASKED]]
|
|
;
|
|
%mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
|
|
%load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough)
|
|
%masked = select <4 x i1> %inv_mask, <4 x i32> zeroinitializer, <4 x i32> %load
|
|
ret <4 x i32> %masked
|
|
}
|
|
|
|
; No transform when select and load masks have no relation.
|
|
define <4 x i32> @masked_load_and_zero_inactive_7(ptr %ptr, <4 x i1> %mask1, <4 x i1> %mask2) {
|
|
; CHECK-LABEL: @masked_load_and_zero_inactive_7(
|
|
; CHECK-NEXT: [[LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK1:%.*]], <4 x i32> zeroinitializer)
|
|
; CHECK-NEXT: [[MASKED:%.*]] = select <4 x i1> [[MASK2:%.*]], <4 x i32> zeroinitializer, <4 x i32> [[LOAD]]
|
|
; CHECK-NEXT: ret <4 x i32> [[MASKED]]
|
|
;
|
|
%load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask1, <4 x i32> zeroinitializer)
|
|
%masked = select <4 x i1> %mask2, <4 x i32> zeroinitializer, <4 x i32> %load
|
|
ret <4 x i32> %masked
|
|
}
|
|
|
|
; A more complex case where we can prove the select mask is a subset of the
|
|
; load's inactive lanes and thus the load's passthrough takes effect.
|
|
define <4 x float> @masked_load_and_zero_inactive_8(ptr %ptr, <4 x i1> %inv_mask, <4 x i1> %cond) {
|
|
; CHECK-LABEL: @masked_load_and_zero_inactive_8(
|
|
; CHECK-NEXT: [[MASK:%.*]] = xor <4 x i1> [[INV_MASK:%.*]], <i1 true, i1 true, i1 true, i1 true>
|
|
; CHECK-NEXT: [[PG:%.*]] = and <4 x i1> [[COND:%.*]], [[MASK]]
|
|
; CHECK-NEXT: [[LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[PG]], <4 x float> zeroinitializer)
|
|
; CHECK-NEXT: ret <4 x float> [[LOAD]]
|
|
;
|
|
%mask = xor <4 x i1> %inv_mask, <i1 true, i1 true, i1 true, i1 true>
|
|
%pg = and <4 x i1> %mask, %cond
|
|
%load = call <4 x float> @llvm.masked.load.v4f32.p0(ptr %ptr, i32 4, <4 x i1> %pg, <4 x float> undef)
|
|
%masked = select <4 x i1> %inv_mask, <4 x float> zeroinitializer, <4 x float> %load
|
|
ret <4 x float> %masked
|
|
}
|
|
|
|
define <8 x float> @masked_load_and_scalar_select_cond(ptr %ptr, <8 x i1> %mask, i1 %cond) {
|
|
; CHECK-LABEL: @masked_load_and_scalar_select_cond(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[PTR:%.*]], i32 32, <8 x i1> [[MASK:%.*]], <8 x float> undef)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[COND:%.*]], <8 x float> zeroinitializer, <8 x float> [[TMP0]]
|
|
; CHECK-NEXT: ret <8 x float> [[TMP1]]
|
|
;
|
|
entry:
|
|
%0 = call <8 x float> @llvm.masked.load.v8f32.p0(ptr %ptr, i32 32, <8 x i1> %mask, <8 x float> undef)
|
|
%1 = select i1 %cond, <8 x float> zeroinitializer, <8 x float> %0
|
|
ret <8 x float> %1
|
|
}
|
|
|
|
declare <8 x float> @llvm.masked.load.v8f32.p0(ptr, i32 immarg, <8 x i1>, <8 x float>)
|
|
declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>)
|
|
declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)
|