Commit 2d77aea5 by Alexis Hetu Committed by Alexis Hétu

Make VkNonDispatchableHandle POD

Modified VkNonDispatchableHandle so that it's a plain old data type. By making sure VkNonDispatchableHandle is POD, it may benefit from the same rules and register usage as the default object handles. Bug b/129979580 Change-Id: I8aea419df8a6ee2ff95717424f3344e1c317fb28 Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/32989Tested-by: 's avatarAlexis Hétu <sugoi@google.com> Presubmit-Ready: Alexis Hétu <sugoi@google.com> Kokoro-Presubmit: kokoro <noreply+kokoro@google.com> Reviewed-by: 's avatarNicolas Capens <nicolascapens@google.com>
parent ef44b440
......@@ -379,7 +379,7 @@ namespace sw
{
ASSERT(insns.size() > 0);
if (renderPass != VK_NULL_HANDLE)
if (renderPass)
{
// capture formats of any input attachments present
auto subpass = renderPass->getSubpass(subpassIndex);
......
......@@ -23,24 +23,14 @@
namespace
{
inline uintptr_t toPtr(const VkDescriptorSet& descSet)
inline VkDescriptorSet asDescriptorSet(uint8_t* memory)
{
return reinterpret_cast<uintptr_t>(vk::Cast(descSet));
return vk::TtoVkT<vk::DescriptorSet, VkDescriptorSet>(reinterpret_cast<vk::DescriptorSet*>(memory));
}
inline uint64_t operator+(const VkDescriptorSet& lhs, size_t offset)
inline uint8_t* asMemory(VkDescriptorSet descriptorSet)
{
return static_cast<uint64_t>(toPtr(lhs) + offset);
}
inline void operator+=(VkDescriptorSet& lhs, size_t offset)
{
lhs = static_cast<uint64_t>(toPtr(lhs) + offset);
}
inline uintptr_t operator-(const VkDescriptorSet& lhs, const VkDescriptorSet& rhs)
{
return toPtr(lhs) - toPtr(rhs);
return reinterpret_cast<uint8_t*>(vk::Cast(descriptorSet));
}
}
......@@ -49,14 +39,14 @@ namespace vk
{
DescriptorPool::DescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo, void* mem) :
pool(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(mem))),
pool(static_cast<uint8_t*>(mem)),
poolSize(ComputeRequiredAllocationSize(pCreateInfo))
{
}
void DescriptorPool::destroy(const VkAllocationCallbacks* pAllocator)
{
vk::deallocate(pool.get(), pAllocator);
vk::deallocate(pool, pAllocator);
}
size_t DescriptorPool::ComputeRequiredAllocationSize(const VkDescriptorPoolCreateInfo* pCreateInfo)
......@@ -93,7 +83,7 @@ VkResult DescriptorPool::allocateSets(uint32_t descriptorSetCount, const VkDescr
return result;
}
VkDescriptorSet DescriptorPool::findAvailableMemory(size_t size)
uint8_t* DescriptorPool::findAvailableMemory(size_t size)
{
if(nodes.empty())
{
......@@ -124,7 +114,7 @@ VkDescriptorSet DescriptorPool::findAvailableMemory(size_t size)
++nextIt;
for(auto it = itBegin; nextIt != itEnd; ++it, ++nextIt)
{
VkDescriptorSet freeSpaceStart(it->set + it->size);
uint8_t* freeSpaceStart = it->set + it->size;
freeSpace = nextIt->set - freeSpaceStart;
if(freeSpace >= size)
{
......@@ -132,7 +122,7 @@ VkDescriptorSet DescriptorPool::findAvailableMemory(size_t size)
}
}
return VK_NULL_HANDLE;
return nullptr;
}
VkResult DescriptorPool::allocateSets(size_t* sizes, uint32_t numAllocs, VkDescriptorSet* pDescriptorSets)
......@@ -150,13 +140,13 @@ VkResult DescriptorPool::allocateSets(size_t* sizes, uint32_t numAllocs, VkDescr
// Attempt to allocate single chunk of memory
{
VkDescriptorSet memory = findAvailableMemory(totalSize);
if(memory != VK_NULL_HANDLE)
uint8_t* memory = findAvailableMemory(totalSize);
if(memory)
{
for(uint32_t i = 0; i < numAllocs; i++)
{
pDescriptorSets[i] = memory;
nodes.insert(Node(pDescriptorSets[i], sizes[i]));
pDescriptorSets[i] = asDescriptorSet(memory);
nodes.insert(Node(memory, sizes[i]));
memory += sizes[i];
}
......@@ -167,8 +157,12 @@ VkResult DescriptorPool::allocateSets(size_t* sizes, uint32_t numAllocs, VkDescr
// Atttempt to allocate each descriptor set separately
for(uint32_t i = 0; i < numAllocs; i++)
{
pDescriptorSets[i] = findAvailableMemory(sizes[i]);
if(pDescriptorSets[i] == VK_NULL_HANDLE)
uint8_t* memory = findAvailableMemory(sizes[i]);
if(memory)
{
pDescriptorSets[i] = asDescriptorSet(memory);
}
else
{
// vkAllocateDescriptorSets can be used to create multiple descriptor sets. If the
// creation of any of those descriptor sets fails, then the implementation must
......@@ -181,7 +175,7 @@ VkResult DescriptorPool::allocateSets(size_t* sizes, uint32_t numAllocs, VkDescr
}
return (computeTotalFreeSize() > totalSize) ? VK_ERROR_FRAGMENTED_POOL : VK_ERROR_OUT_OF_POOL_MEMORY;
}
nodes.insert(Node(pDescriptorSets[i], sizes[i]));
nodes.insert(Node(memory, sizes[i]));
}
return VK_SUCCESS;
......@@ -198,7 +192,7 @@ void DescriptorPool::freeSets(uint32_t descriptorSetCount, const VkDescriptorSet
void DescriptorPool::freeSet(const VkDescriptorSet descriptorSet)
{
const auto itEnd = nodes.end();
auto it = std::find(nodes.begin(), itEnd, descriptorSet);
auto it = std::find(nodes.begin(), itEnd, asMemory(descriptorSet));
if(it != itEnd)
{
nodes.erase(it);
......
......@@ -34,22 +34,22 @@ namespace vk
private:
VkResult allocateSets(size_t* sizes, uint32_t numAllocs, VkDescriptorSet* pDescriptorSets);
VkDescriptorSet findAvailableMemory(size_t size);
uint8_t* findAvailableMemory(size_t size);
void freeSet(const VkDescriptorSet descriptorSet);
size_t computeTotalFreeSize() const;
struct Node
{
Node(VkDescriptorSet set, size_t size) : set(set), size(size) {}
Node(uint8_t* set, size_t size) : set(set), size(size) {}
bool operator<(const Node& node) const { return set < node.set; }
bool operator==(VkDescriptorSet other) const { return set == other; }
bool operator==(const uint8_t* other) const { return set == other; }
VkDescriptorSet set = VK_NULL_HANDLE;
uint8_t* set = nullptr;
size_t size = 0;
};
std::set<Node> nodes;
VkDescriptorSet pool = VK_NULL_HANDLE;
uint8_t* pool = nullptr;
size_t poolSize = 0;
};
......
......@@ -34,7 +34,7 @@ namespace vk
public:
static inline DescriptorSet* Cast(VkDescriptorSet object)
{
return static_cast<DescriptorSet*>(object.get());
return static_cast<DescriptorSet*>(static_cast<void*>(object));
}
using Bindings = std::array<vk::DescriptorSet*, vk::MAX_BOUND_DESCRIPTOR_SETS>;
......
......@@ -331,7 +331,7 @@ PFN_vkVoidFunction GetInstanceProcAddr(Instance* instance, const char* pName)
return globalFunction->second;
}
if(instance != nullptr)
if(instance)
{
auto instanceFunction = instanceFunctionPointers.find(std::string(pName));
if(instanceFunction != instanceFunctionPointers.end())
......
......@@ -29,13 +29,13 @@ namespace vk
template<typename T, typename VkT>
static inline T* VkTtoT(VkT vkObject)
{
return static_cast<T*>(vkObject.get());
return static_cast<T*>(static_cast<void*>(vkObject));
}
template<typename T, typename VkT>
static inline VkT TtoVkT(T* object)
{
return VkT(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(object)));
return { static_cast<uint64_t>(reinterpret_cast<uintptr_t>(object)) };
}
// For use in the placement new to make it verbose that we're allocating an object using device memory
......
......@@ -89,7 +89,7 @@ RenderPass::RenderPass(const VkRenderPassCreateInfo* pCreateInfo, void* mem) :
subpass.pColorAttachments, colorAttachmentsSize);
hostMemory += colorAttachmentsSize;
if(subpass.pResolveAttachments != nullptr)
if(subpass.pResolveAttachments)
{
subpasses[i].pResolveAttachments = reinterpret_cast<VkAttachmentReference*>(hostMemory);
memcpy(const_cast<VkAttachmentReference*>(subpasses[i].pResolveAttachments),
......@@ -107,7 +107,7 @@ RenderPass::RenderPass(const VkRenderPassCreateInfo* pCreateInfo, void* mem) :
}
}
if(subpass.pDepthStencilAttachment != nullptr)
if(subpass.pDepthStencilAttachment)
{
subpasses[i].pDepthStencilAttachment = reinterpret_cast<VkAttachmentReference*>(hostMemory);
memcpy(const_cast<VkAttachmentReference*>(subpasses[i].pDepthStencilAttachment),
......@@ -156,11 +156,11 @@ size_t RenderPass::ComputeRequiredAllocationSize(const VkRenderPassCreateInfo* p
{
const auto& subpass = pCreateInfo->pSubpasses[i];
uint32_t nbAttachments = subpass.inputAttachmentCount + subpass.colorAttachmentCount;
if(subpass.pResolveAttachments != nullptr)
if(subpass.pResolveAttachments)
{
nbAttachments += subpass.colorAttachmentCount;
}
if(subpass.pDepthStencilAttachment != nullptr)
if(subpass.pDepthStencilAttachment)
{
nbAttachments += 1;
}
......
......@@ -17,26 +17,29 @@
#include <cstddef>
#include <cstdint>
#include <type_traits>
template<typename T> class VkNonDispatchableHandle
{
public:
VkNonDispatchableHandle(uint64_t h) : handle(h)
operator void*() const
{
static_assert(sizeof(VkNonDispatchableHandle) == sizeof(uint64_t), "Size is not 64 bits!");
}
void* get() const
{
// VkNonDispatchabbleHandle must be POD to ensure it gets passed by value the same way as a uint64_t,
// which is the upstream header's handle type when compiled for 32b architectures. On 64b architectures,
// the upstream header's handle type is a pointer type.
static_assert(std::is_trivial<VkNonDispatchableHandle<T>>::value, "VkNonDispatchableHandle<T> is not trivial!");
static_assert(std::is_standard_layout<VkNonDispatchableHandle<T>>::value, "VkNonDispatchableHandle<T> is not standard layout!");
return reinterpret_cast<void*>(static_cast<uintptr_t>(handle));
}
operator void*() const
void operator=(uint64_t h)
{
return get();
handle = h;
}
private:
uint64_t handle;
};
......
......@@ -103,7 +103,7 @@ void PresentImage::clear()
VkImage PresentImage::asVkImage() const
{
return image ? static_cast<VkImage>(*image) : VK_NULL_HANDLE;
return image ? static_cast<VkImage>(*image) : VkImage({ VK_NULL_HANDLE });
}
void SurfaceKHR::getSurfaceCapabilities(VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) const
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment