Commit 63468081 by Nicolas Capens Committed by Nicolas Capens

Refactor binary group operations

Instead of deducing the binary operation type from the identity value, which used a type alias, just specify it explicitly. This enables passing the initialization value used as the identity, instead of necessarily an identity value vector itself, as well as omitting the type alias. In turn this allowed omitting the scope brackets, resulting in a significant code compaction without compromising readability. Bug: b/142002682 Change-Id: I23d6d984bbfdf47af72108adbc942992007fc3c0 Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/40168 Presubmit-Ready: Nicolas Capens <nicolascapens@google.com> Reviewed-by: 's avatarBen Clayton <bclayton@google.com> Tested-by: 's avatarNicolas Capens <nicolascapens@google.com>
parent e5720880
...@@ -20,18 +20,18 @@ namespace sw { ...@@ -20,18 +20,18 @@ namespace sw {
struct SpirvShader::Impl::Group struct SpirvShader::Impl::Group
{ {
// Template function to perform a binary operation. // Template function to perform a binary operation.
// |TYPE| should be the type of the identity value (as an SIMD::<Type>). // |TYPE| should be the type of the binary operation (as a SIMD::<ScalarType>).
// |I| should be a type suitable to initialize the identity value.
// |APPLY| should be a callable object that takes two RValue<TYPE> parameters // |APPLY| should be a callable object that takes two RValue<TYPE> parameters
// and returns a new RValue<TYPE> corresponding to the operation's result. // and returns a new RValue<TYPE> corresponding to the operation's result.
template<typename TYPE, typename APPLY> template<typename TYPE, typename I, typename APPLY>
static void BinaryOperation( static void BinaryOperation(
const SpirvShader *shader, const SpirvShader *shader,
const SpirvShader::InsnIterator &insn, const SpirvShader::InsnIterator &insn,
const SpirvShader::EmitState *state, const SpirvShader::EmitState *state,
Intermediate &dst, Intermediate &dst,
const TYPE &identity, const I identityValue,
APPLY &&apply) APPLY &&apply)
{ {
SpirvShader::GenericValue value(shader, state, insn.word(5)); SpirvShader::GenericValue value(shader, state, insn.word(5));
...@@ -39,6 +39,7 @@ struct SpirvShader::Impl::Group ...@@ -39,6 +39,7 @@ struct SpirvShader::Impl::Group
for(auto i = 0u; i < type.sizeInComponents; i++) for(auto i = 0u; i < type.sizeInComponents; i++)
{ {
auto mask = As<SIMD::UInt>(state->activeLaneMask()); auto mask = As<SIMD::UInt>(state->activeLaneMask());
auto identity = TYPE(identityValue);
SIMD::UInt v_uint = (value.UInt(i) & mask) | (As<SIMD::UInt>(identity) & ~mask); SIMD::UInt v_uint = (value.UInt(i) & mask) | (As<SIMD::UInt>(identity) & ~mask);
TYPE v = As<TYPE>(v_uint); TYPE v = As<TYPE>(v_uint);
switch(spv::GroupOperation(insn.word(4))) switch(spv::GroupOperation(insn.word(4)))
...@@ -316,174 +317,109 @@ SpirvShader::EmitResult SpirvShader::EmitGroupNonUniform(InsnIterator insn, Emit ...@@ -316,174 +317,109 @@ SpirvShader::EmitResult SpirvShader::EmitGroupNonUniform(InsnIterator insn, Emit
} }
case spv::OpGroupNonUniformIAdd: case spv::OpGroupNonUniformIAdd:
{ Impl::Group::BinaryOperation<SIMD::Int>(
using Type = SIMD::Int; this, insn, state, dst, 0,
Impl::Group::BinaryOperation( [](auto a, auto b) { return a + b; });
this, insn, state, dst,
Type(0),
[](RValue<Type> a, RValue<Type> b) { return a + b; });
break; break;
}
case spv::OpGroupNonUniformFAdd: case spv::OpGroupNonUniformFAdd:
{ Impl::Group::BinaryOperation<SIMD::Float>(
using Type = SIMD::Float; this, insn, state, dst, 0.0f,
Impl::Group::BinaryOperation( [](auto a, auto b) { return a + b; });
this, insn, state, dst,
Type(0.),
[](RValue<Type> a, RValue<Type> b) { return a + b; });
break; break;
}
case spv::OpGroupNonUniformIMul: case spv::OpGroupNonUniformIMul:
{ Impl::Group::BinaryOperation<SIMD::Int>(
using Type = SIMD::Int; this, insn, state, dst, 1,
Impl::Group::BinaryOperation( [](auto a, auto b) { return a * b; });
this, insn, state, dst,
Type(1),
[](RValue<Type> a, RValue<Type> b) { return a * b; });
break; break;
}
case spv::OpGroupNonUniformFMul: case spv::OpGroupNonUniformFMul:
{ Impl::Group::BinaryOperation<SIMD::Float>(
using Type = SIMD::Float; this, insn, state, dst, 1.0f,
Impl::Group::BinaryOperation( [](auto a, auto b) { return a * b; });
this, insn, state, dst,
Type(1.),
[](RValue<Type> a, RValue<Type> b) { return a * b; });
break; break;
}
case spv::OpGroupNonUniformBitwiseAnd: case spv::OpGroupNonUniformBitwiseAnd:
{ Impl::Group::BinaryOperation<SIMD::UInt>(
using Type = SIMD::UInt; this, insn, state, dst, ~0u,
Impl::Group::BinaryOperation( [](auto a, auto b) { return a & b; });
this, insn, state, dst,
Type(~0u),
[](RValue<Type> a, RValue<Type> b) { return a & b; });
break; break;
}
case spv::OpGroupNonUniformBitwiseOr: case spv::OpGroupNonUniformBitwiseOr:
{ Impl::Group::BinaryOperation<SIMD::UInt>(
using Type = SIMD::UInt; this, insn, state, dst, 0,
Impl::Group::BinaryOperation( [](auto a, auto b) { return a | b; });
this, insn, state, dst,
Type(0),
[](RValue<Type> a, RValue<Type> b) { return a | b; });
break; break;
}
case spv::OpGroupNonUniformBitwiseXor: case spv::OpGroupNonUniformBitwiseXor:
{ Impl::Group::BinaryOperation<SIMD::UInt>(
using Type = SIMD::UInt; this, insn, state, dst, 0,
Impl::Group::BinaryOperation( [](auto a, auto b) { return a ^ b; });
this, insn, state, dst,
Type(0),
[](RValue<Type> a, RValue<Type> b) { return a ^ b; });
break; break;
}
case spv::OpGroupNonUniformSMin: case spv::OpGroupNonUniformSMin:
{ Impl::Group::BinaryOperation<SIMD::Int>(
using Type = SIMD::Int; this, insn, state, dst, INT32_MAX,
Impl::Group::BinaryOperation( [](auto a, auto b) { return Min(a, b); });
this, insn, state, dst,
Type(INT32_MAX),
[](RValue<Type> a, RValue<Type> b) { return Min(a, b); });
break; break;
}
case spv::OpGroupNonUniformUMin: case spv::OpGroupNonUniformUMin:
{ Impl::Group::BinaryOperation<SIMD::UInt>(
using Type = SIMD::UInt; this, insn, state, dst, ~0u,
Impl::Group::BinaryOperation( [](auto a, auto b) { return Min(a, b); });
this, insn, state, dst,
Type(~0u),
[](RValue<Type> a, RValue<Type> b) { return Min(a, b); });
break; break;
}
case spv::OpGroupNonUniformFMin: case spv::OpGroupNonUniformFMin:
{ Impl::Group::BinaryOperation<SIMD::Float>(
using Type = SIMD::Float; this, insn, state, dst, SIMD::Float::infinity(),
Impl::Group::BinaryOperation( [](auto a, auto b) { return NMin(a, b); });
this, insn, state, dst,
Type::infinity(),
[](RValue<Type> a, RValue<Type> b) { return NMin(a, b); });
break; break;
}
case spv::OpGroupNonUniformSMax: case spv::OpGroupNonUniformSMax:
{ Impl::Group::BinaryOperation<SIMD::Int>(
using Type = SIMD::Int; this, insn, state, dst, INT32_MIN,
Impl::Group::BinaryOperation( [](auto a, auto b) { return Max(a, b); });
this, insn, state, dst,
Type(INT32_MIN),
[](RValue<Type> a, RValue<Type> b) { return Max(a, b); });
break; break;
}
case spv::OpGroupNonUniformUMax: case spv::OpGroupNonUniformUMax:
{ Impl::Group::BinaryOperation<SIMD::UInt>(
using Type = SIMD::UInt; this, insn, state, dst, 0,
Impl::Group::BinaryOperation( [](auto a, auto b) { return Max(a, b); });
this, insn, state, dst,
Type(0),
[](RValue<Type> a, RValue<Type> b) { return Max(a, b); });
break; break;
}
case spv::OpGroupNonUniformFMax: case spv::OpGroupNonUniformFMax:
{ Impl::Group::BinaryOperation<SIMD::Float>(
using Type = SIMD::Float; this, insn, state, dst, -SIMD::Float::infinity(),
SIMD::Float negative_inf = -SIMD::Float::infinity(); [](auto a, auto b) { return NMax(a, b); });
Impl::Group::BinaryOperation(
this, insn, state, dst,
negative_inf,
[](RValue<Type> a, RValue<Type> b) { return NMax(a, b); });
break; break;
}
case spv::OpGroupNonUniformLogicalAnd: case spv::OpGroupNonUniformLogicalAnd:
{ Impl::Group::BinaryOperation<SIMD::UInt>(
using Type = SIMD::UInt; this, insn, state, dst, ~0u,
Impl::Group::BinaryOperation( [](auto a, auto b) {
this, insn, state, dst,
Type(~0u),
[](RValue<Type> a, RValue<Type> b) {
SIMD::UInt zero = SIMD::UInt(0); SIMD::UInt zero = SIMD::UInt(0);
return CmpNEQ(a, zero) & CmpNEQ(b, zero); return CmpNEQ(a, zero) & CmpNEQ(b, zero);
}); });
break; break;
}
case spv::OpGroupNonUniformLogicalOr: case spv::OpGroupNonUniformLogicalOr:
{ Impl::Group::BinaryOperation<SIMD::UInt>(
using Type = SIMD::UInt; this, insn, state, dst, 0,
Impl::Group::BinaryOperation( [](auto a, auto b) {
this, insn, state, dst,
Type(0),
[](RValue<Type> a, RValue<Type> b) {
SIMD::UInt zero = SIMD::UInt(0); SIMD::UInt zero = SIMD::UInt(0);
return CmpNEQ(a, zero) | CmpNEQ(b, zero); return CmpNEQ(a, zero) | CmpNEQ(b, zero);
}); });
break; break;
}
case spv::OpGroupNonUniformLogicalXor: case spv::OpGroupNonUniformLogicalXor:
{ Impl::Group::BinaryOperation<SIMD::UInt>(
using Type = SIMD::UInt; this, insn, state, dst, 0,
Impl::Group::BinaryOperation( [](auto a, auto b) {
this, insn, state, dst,
Type(0),
[](RValue<Type> a, RValue<Type> b) {
SIMD::UInt zero = SIMD::UInt(0); SIMD::UInt zero = SIMD::UInt(0);
return CmpNEQ(a, zero) ^ CmpNEQ(b, zero); return CmpNEQ(a, zero) ^ CmpNEQ(b, zero);
}); });
break; break;
}
default: default:
UNIMPLEMENTED("EmitGroupNonUniform op: %s", OpcodeName(type.opcode()).c_str()); UNIMPLEMENTED("EmitGroupNonUniform op: %s", OpcodeName(type.opcode()).c_str());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment