Commit 9e4bc1ba by Ben Clayton

SpirvShader: Refactor loads and stores. Consider limits.

This change moves all calls to rr::Load() and rr::Store() to two new functions: SIMD::Load() and SIMD::Store(). This attempts to consolodate the SIMD memory ops into reusable functions, while also adding bounds checking on the accesses. The additional branches hurts the JIT codegen performance. This will be resolved with a future change. Tests: dEQP-VK.robustness.* Bug: b/131224163 Change-Id: I3a392a1f4f5366fa5134c081e0a2479575f92d80 Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/29334Reviewed-by: 's avatarChris Forbes <chrisforbes@google.com> Tested-by: 's avatarBen Clayton <bclayton@google.com> Presubmit-Ready: Ben Clayton <bclayton@google.com> Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
parent 4d1f8d05
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "Vulkan/VkDebug.hpp" #include "Vulkan/VkDebug.hpp"
#include "Vulkan/VkConfig.h" #include "Vulkan/VkConfig.h"
#include "Vulkan/VkDescriptorSet.hpp" #include "Vulkan/VkDescriptorSet.hpp"
#include "Common/Types.hpp"
#include "Device/Config.hpp" #include "Device/Config.hpp"
#include <spirv/unified1/spirv.hpp> #include <spirv/unified1/spirv.hpp>
...@@ -65,21 +66,122 @@ namespace sw ...@@ -65,21 +66,122 @@ namespace sw
struct Pointer struct Pointer
{ {
Pointer(rr::Pointer<Byte> base) : base(base), offset(0), uniform(true) {} Pointer(rr::Pointer<Byte> base, rr::Int limit)
Pointer(rr::Pointer<Byte> base, SIMD::Int offset) : base(base), offset(offset), uniform(false) {} : base(base), limit(limit), dynamicOffsets(0), staticOffsets{}, hasDynamicOffsets(false) {}
Pointer(rr::Pointer<Byte> base, rr::Int limit, SIMD::Int offset)
: base(base), limit(limit), dynamicOffsets(offset), staticOffsets{}, hasDynamicOffsets(false) {}
inline void addOffset(Int delta) { offset += delta; uniform = false; } inline Pointer& operator += (Int i)
{
dynamicOffsets += i;
hasDynamicOffsets = true;
return *this;
}
inline Pointer& operator *= (Int i)
{
dynamicOffsets = offsets() * i;
staticOffsets = {};
hasDynamicOffsets = true;
return *this;
}
inline Pointer operator + (SIMD::Int i) { Pointer p = *this; p += i; return p; }
inline Pointer operator * (SIMD::Int i) { Pointer p = *this; p *= i; return p; }
inline Pointer& operator += (int i)
{
for (int el = 0; el < SIMD::Width; el++) { staticOffsets[el] += i; }
return *this;
}
inline Pointer& operator *= (int i)
{
for (int el = 0; el < SIMD::Width; el++) { staticOffsets[el] *= i; }
if (hasDynamicOffsets)
{
dynamicOffsets *= SIMD::Int(i);
}
return *this;
}
inline Pointer operator + (int i) { Pointer p = *this; p += i; return p; }
inline Pointer operator * (int i) { Pointer p = *this; p *= i; return p; }
inline SIMD::Int offsets() const
{
static_assert(SIMD::Width == 4, "Expects SIMD::Width to be 4");
return dynamicOffsets + SIMD::Int(staticOffsets[0], staticOffsets[1], staticOffsets[2], staticOffsets[3]);
}
// Returns true if all offsets are sequential (N+0, N+1, N+2, N+3)
inline rr::Bool hasSequentialOffsets() const
{
if (hasDynamicOffsets)
{
auto o = offsets();
static_assert(SIMD::Width == 4, "Expects SIMD::Width to be 4");
return rr::SignMask(~CmpEQ(o.yzww, o + SIMD::Int(1, 2, 3, 0))) == 0;
}
else
{
for (int i = 1; i < SIMD::Width; i++)
{
if (staticOffsets[i-1] + 1 != staticOffsets[i]) { return false; }
}
return true;
}
}
// Returns true if all offsets are equal (N, N, N, N)
inline rr::Bool hasEqualOffsets() const
{
if (hasDynamicOffsets)
{
auto o = offsets();
static_assert(SIMD::Width == 4, "Expects SIMD::Width to be 4");
return rr::SignMask(~CmpEQ(o, o.yzwx)) == 0;
}
else
{
for (int i = 1; i < SIMD::Width; i++)
{
if (staticOffsets[i-1] != staticOffsets[i]) { return false; }
}
return true;
}
}
// Base address for the pointer, common across all lanes. // Base address for the pointer, common across all lanes.
rr::Pointer<rr::Byte> base; rr::Pointer<rr::Byte> base;
// Per lane offsets from base in bytes. // Upper (non-inclusive) limit for offsets from base.
// If uniform is true, all offsets are considered zero. rr::Int limit;
Int offset;
// Per lane offsets from base.
SIMD::Int dynamicOffsets; // If hasDynamicOffsets is false, all dynamicOffsets are zero.
std::array<int32_t, SIMD::Width> staticOffsets;
// True if all offsets are zero. // True if all dynamicOffsets are zero.
bool uniform; bool hasDynamicOffsets;
}; };
template <typename T> struct Element {};
template <> struct Element<Float> { using type = rr::Float; };
template <> struct Element<Int> { using type = rr::Int; };
template <> struct Element<UInt> { using type = rr::UInt; };
template<typename T>
void Store(Pointer ptr, T val, Int mask, bool atomic = false, std::memory_order order = std::memory_order_relaxed);
template<typename T>
void Store(Pointer ptr, RValue<T> val, Int mask, bool atomic = false, std::memory_order order = std::memory_order_relaxed)
{
Store(ptr, T(val), mask, atomic, order);
}
template<typename T>
T Load(Pointer ptr, Int mask, bool atomic = false, std::memory_order order = std::memory_order_relaxed);
} }
// Incrementally constructed complex bundle of rvalues // Incrementally constructed complex bundle of rvalues
...@@ -736,7 +838,7 @@ namespace sw ...@@ -736,7 +838,7 @@ namespace sw
EmitResult EmitAtomicOp(InsnIterator insn, EmitState *state) const; EmitResult EmitAtomicOp(InsnIterator insn, EmitState *state) const;
EmitResult EmitAtomicCompareExchange(InsnIterator insn, EmitState *state) const; EmitResult EmitAtomicCompareExchange(InsnIterator insn, EmitState *state) const;
SIMD::Int GetTexelOffset(GenericValue const & coordinate, Type const & imageType, Pointer<Byte> descriptor, int texelSize) const; SIMD::Pointer GetTexelAddress(SIMD::Pointer base, GenericValue const & coordinate, Type const & imageType, Pointer<Byte> descriptor, int texelSize) const;
// OpcodeName() returns the name of the opcode op. // OpcodeName() returns the name of the opcode op.
// If NDEBUG is defined, then OpcodeName() will only return the numerical code. // If NDEBUG is defined, then OpcodeName() will only return the numerical code.
......
...@@ -22,6 +22,7 @@ namespace vk ...@@ -22,6 +22,7 @@ namespace vk
{ {
const int Buffer::DataOffset = static_cast<int>(offsetof(Buffer, memory)); const int Buffer::DataOffset = static_cast<int>(offsetof(Buffer, memory));
const int Buffer::DataSize = static_cast<int>(offsetof(Buffer, size));
Buffer::Buffer(const VkBufferCreateInfo* pCreateInfo, void* mem) : Buffer::Buffer(const VkBufferCreateInfo* pCreateInfo, void* mem) :
flags(pCreateInfo->flags), size(pCreateInfo->size), usage(pCreateInfo->usage), flags(pCreateInfo->flags), size(pCreateInfo->size), usage(pCreateInfo->usage),
......
...@@ -43,6 +43,7 @@ public: ...@@ -43,6 +43,7 @@ public:
// DataOffset is the offset in bytes from the Buffer to the pointer to the // DataOffset is the offset in bytes from the Buffer to the pointer to the
// buffer's data memory. // buffer's data memory.
static const int DataOffset; static const int DataOffset;
static const int DataSize;
private: private:
void* memory = nullptr; void* memory = nullptr;
......
...@@ -28,7 +28,7 @@ BufferView::BufferView(const VkBufferViewCreateInfo* pCreateInfo, void* mem) : ...@@ -28,7 +28,7 @@ BufferView::BufferView(const VkBufferViewCreateInfo* pCreateInfo, void* mem) :
} }
else else
{ {
range = pCreateInfo->range - offset; range = pCreateInfo->range;
} }
} }
......
...@@ -34,6 +34,7 @@ public: ...@@ -34,6 +34,7 @@ public:
void *getPointer() const; void *getPointer() const;
uint32_t getElementCount() const { return range / Format(format).bytes(); } uint32_t getElementCount() const { return range / Format(format).bytes(); }
uint32_t getRangeInBytes() const { return range; }
private: private:
VkBuffer buffer; VkBuffer buffer;
......
...@@ -429,6 +429,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor ...@@ -429,6 +429,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor
? imageView->layerPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT) ? imageView->layerPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT)
: imageView->slicePitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, 0); : imageView->slicePitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, 0);
descriptor[i].arrayLayers = imageView->getSubresourceRange().layerCount; descriptor[i].arrayLayers = imageView->getSubresourceRange().layerCount;
descriptor[i].sizeInBytes = imageView->getImageSizeInBytes();
} }
} }
else if (entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) else if (entry.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
...@@ -443,6 +444,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor ...@@ -443,6 +444,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor
descriptor[i].rowPitchBytes = 0; descriptor[i].rowPitchBytes = 0;
descriptor[i].slicePitchBytes = 0; descriptor[i].slicePitchBytes = 0;
descriptor[i].arrayLayers = 1; descriptor[i].arrayLayers = 1;
descriptor[i].sizeInBytes = bufferView->getRangeInBytes();
} }
} }
else else
......
...@@ -43,6 +43,7 @@ struct StorageImageDescriptor ...@@ -43,6 +43,7 @@ struct StorageImageDescriptor
int rowPitchBytes; int rowPitchBytes;
int slicePitchBytes; int slicePitchBytes;
int arrayLayers; int arrayLayers;
int sizeInBytes;
}; };
class DescriptorSetLayout : public Object<DescriptorSetLayout, VkDescriptorSetLayout> class DescriptorSetLayout : public Object<DescriptorSetLayout, VkDescriptorSetLayout>
......
...@@ -50,6 +50,7 @@ public: ...@@ -50,6 +50,7 @@ public:
const VkComponentMapping &getComponentMapping() const { return components; } const VkComponentMapping &getComponentMapping() const { return components; }
const VkImageSubresourceRange &getSubresourceRange() const { return subresourceRange; } const VkImageSubresourceRange &getSubresourceRange() const { return subresourceRange; }
const size_t getImageSizeInBytes() const { return image->getMemoryRequirements().size; }
private: private:
bool imageTypesMatch(VkImageType imageType) const; bool imageTypesMatch(VkImageType imageType) const;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment