Commit bd4cf81f by Alexis Hetu Committed by Alexis Hétu

Reduce the complexity of VkNonDispatchableHandle

- Simplified VkNonDispatchableHandle as much as possible so that it's just a wrapper over a uint64_t - Centralized casting logic in vkObject.hpp - Explicitly changed "Cast" to "vk::Cast" to differentiate from vk::<object>::Cast - Moved VkDescriptorSet operators from VulkanPlatform.h to VkDescriptorPool.cpp Bug b/129979580 Change-Id: I4fe5fcfe56029c21594088d274bae862999597ad Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/32928Tested-by: 's avatarAlexis Hétu <sugoi@google.com> Reviewed-by: 's avatarChris Forbes <chrisforbes@google.com> Kokoro-Presubmit: kokoro <noreply+kokoro@google.com>
parent dc842d5e
...@@ -53,7 +53,7 @@ private: ...@@ -53,7 +53,7 @@ private:
static inline Buffer* Cast(VkBuffer object) static inline Buffer* Cast(VkBuffer object)
{ {
return reinterpret_cast<Buffer*>(object.get()); return Buffer::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -20,7 +20,7 @@ namespace vk ...@@ -20,7 +20,7 @@ namespace vk
{ {
BufferView::BufferView(const VkBufferViewCreateInfo* pCreateInfo, void* mem) : BufferView::BufferView(const VkBufferViewCreateInfo* pCreateInfo, void* mem) :
buffer(Cast(pCreateInfo->buffer)), format(pCreateInfo->format), offset(pCreateInfo->offset) buffer(vk::Cast(pCreateInfo->buffer)), format(pCreateInfo->format), offset(pCreateInfo->offset)
{ {
if (pCreateInfo->range == VK_WHOLE_SIZE) if (pCreateInfo->range == VK_WHOLE_SIZE)
{ {
......
...@@ -49,7 +49,7 @@ private: ...@@ -49,7 +49,7 @@ private:
static inline BufferView* Cast(VkBufferView object) static inline BufferView* Cast(VkBufferView object)
{ {
return reinterpret_cast<BufferView*>(object.get()); return BufferView::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -1384,7 +1384,7 @@ void CommandBuffer::bindDescriptorSets(VkPipelineBindPoint pipelineBindPoint, co ...@@ -1384,7 +1384,7 @@ void CommandBuffer::bindDescriptorSets(VkPipelineBindPoint pipelineBindPoint, co
ASSERT(dynamicOffsetCount >= numDynamicDescriptors); ASSERT(dynamicOffsetCount >= numDynamicDescriptors);
addCommand<BindDescriptorSet>( addCommand<BindDescriptorSet>(
pipelineBindPoint, layout, descriptorSetIndex, Cast(pDescriptorSets[i]), pipelineBindPoint, layout, descriptorSetIndex, vk::Cast(pDescriptorSets[i]),
dynamicOffsetCount, pDynamicOffsets); dynamicOffsetCount, pDynamicOffsets);
pDynamicOffsets += numDynamicDescriptors; pDynamicOffsets += numDynamicDescriptors;
......
...@@ -49,6 +49,11 @@ public: ...@@ -49,6 +49,11 @@ public:
CommandBuffer(VkCommandBufferLevel pLevel); CommandBuffer(VkCommandBufferLevel pLevel);
static inline CommandBuffer* Cast(VkCommandBuffer object)
{
return reinterpret_cast<CommandBuffer*>(object);
}
void destroy(const VkAllocationCallbacks* pAllocator); void destroy(const VkAllocationCallbacks* pAllocator);
VkResult begin(VkCommandBufferUsageFlags flags, const VkCommandBufferInheritanceInfo* pInheritanceInfo); VkResult begin(VkCommandBufferUsageFlags flags, const VkCommandBufferInheritanceInfo* pInheritanceInfo);
......
...@@ -95,7 +95,7 @@ VkResult CommandPool::reset(VkCommandPoolResetFlags flags) ...@@ -95,7 +95,7 @@ VkResult CommandPool::reset(VkCommandPoolResetFlags flags)
// the command pool are put in the initial state." // the command pool are put in the initial state."
for(auto commandBuffer : *commandBuffers) for(auto commandBuffer : *commandBuffers)
{ {
Cast(commandBuffer)->reset(flags); vk::Cast(commandBuffer)->reset(flags);
} }
// According the Vulkan 1.1 spec: // According the Vulkan 1.1 spec:
......
...@@ -40,7 +40,7 @@ private: ...@@ -40,7 +40,7 @@ private:
static inline CommandPool* Cast(VkCommandPool object) static inline CommandPool* Cast(VkCommandPool object)
{ {
return reinterpret_cast<CommandPool*>(object.get()); return CommandPool::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -20,18 +20,43 @@ ...@@ -20,18 +20,43 @@
#include <algorithm> #include <algorithm>
#include <memory> #include <memory>
namespace
{
inline uintptr_t toPtr(const VkDescriptorSet& descSet)
{
return reinterpret_cast<uintptr_t>(vk::Cast(descSet));
}
inline uint64_t operator+(const VkDescriptorSet& lhs, size_t offset)
{
return static_cast<uint64_t>(toPtr(lhs) + offset);
}
inline void operator+=(VkDescriptorSet& lhs, size_t offset)
{
lhs = static_cast<uint64_t>(toPtr(lhs) + offset);
}
inline uintptr_t operator-(const VkDescriptorSet& lhs, const VkDescriptorSet& rhs)
{
return toPtr(lhs) - toPtr(rhs);
}
}
namespace vk namespace vk
{ {
DescriptorPool::DescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo, void* mem) : DescriptorPool::DescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo, void* mem) :
pool(static_cast<uint8_t*>(mem)), pool(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(mem))),
poolSize(ComputeRequiredAllocationSize(pCreateInfo)) poolSize(ComputeRequiredAllocationSize(pCreateInfo))
{ {
} }
void DescriptorPool::destroy(const VkAllocationCallbacks* pAllocator) void DescriptorPool::destroy(const VkAllocationCallbacks* pAllocator)
{ {
vk::deallocate(pool, pAllocator); vk::deallocate(pool.get(), pAllocator);
} }
size_t DescriptorPool::ComputeRequiredAllocationSize(const VkDescriptorPoolCreateInfo* pCreateInfo) size_t DescriptorPool::ComputeRequiredAllocationSize(const VkDescriptorPoolCreateInfo* pCreateInfo)
...@@ -54,7 +79,7 @@ VkResult DescriptorPool::allocateSets(uint32_t descriptorSetCount, const VkDescr ...@@ -54,7 +79,7 @@ VkResult DescriptorPool::allocateSets(uint32_t descriptorSetCount, const VkDescr
for(uint32_t i = 0; i < descriptorSetCount; i++) for(uint32_t i = 0; i < descriptorSetCount; i++)
{ {
pDescriptorSets[i] = VK_NULL_HANDLE; pDescriptorSets[i] = VK_NULL_HANDLE;
layoutSizes[i] = Cast(pSetLayouts[i])->getDescriptorSetAllocationSize(); layoutSizes[i] = vk::Cast(pSetLayouts[i])->getDescriptorSetAllocationSize();
} }
VkResult result = allocateSets(&(layoutSizes[0]), descriptorSetCount, pDescriptorSets); VkResult result = allocateSets(&(layoutSizes[0]), descriptorSetCount, pDescriptorSets);
...@@ -62,7 +87,7 @@ VkResult DescriptorPool::allocateSets(uint32_t descriptorSetCount, const VkDescr ...@@ -62,7 +87,7 @@ VkResult DescriptorPool::allocateSets(uint32_t descriptorSetCount, const VkDescr
{ {
for(uint32_t i = 0; i < descriptorSetCount; i++) for(uint32_t i = 0; i < descriptorSetCount; i++)
{ {
Cast(pSetLayouts[i])->initialize(vk::Cast(pDescriptorSets[i])); vk::Cast(pSetLayouts[i])->initialize(vk::Cast(pDescriptorSets[i]));
} }
} }
return result; return result;
......
...@@ -55,7 +55,7 @@ namespace vk ...@@ -55,7 +55,7 @@ namespace vk
static inline DescriptorPool* Cast(VkDescriptorPool object) static inline DescriptorPool* Cast(VkDescriptorPool object)
{ {
return reinterpret_cast<DescriptorPool*>(object.get()); return DescriptorPool::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -32,6 +32,11 @@ namespace vk ...@@ -32,6 +32,11 @@ namespace vk
class DescriptorSet class DescriptorSet
{ {
public: public:
static inline DescriptorSet* Cast(VkDescriptorSet object)
{
return static_cast<DescriptorSet*>(object.get());
}
using Bindings = std::array<vk::DescriptorSet*, vk::MAX_BOUND_DESCRIPTOR_SETS>; using Bindings = std::array<vk::DescriptorSet*, vk::MAX_BOUND_DESCRIPTOR_SETS>;
using DynamicOffsets = std::array<uint32_t, vk::MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC>; using DynamicOffsets = std::array<uint32_t, vk::MAX_DESCRIPTOR_SET_COMBINED_BUFFERS_DYNAMIC>;
...@@ -41,7 +46,7 @@ namespace vk ...@@ -41,7 +46,7 @@ namespace vk
inline DescriptorSet* Cast(VkDescriptorSet object) inline DescriptorSet* Cast(VkDescriptorSet object)
{ {
return reinterpret_cast<DescriptorSet*>(object.get()); return DescriptorSet::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -303,7 +303,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor ...@@ -303,7 +303,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor
for (uint32_t i = 0; i < entry.descriptorCount; i++) for (uint32_t i = 0; i < entry.descriptorCount; i++)
{ {
auto update = reinterpret_cast<VkBufferView const *>(src + entry.offset + entry.stride * i); auto update = reinterpret_cast<VkBufferView const *>(src + entry.offset + entry.stride * i);
auto bufferView = Cast(*update); auto bufferView = vk::Cast(*update);
imageSampler[i].type = VK_IMAGE_VIEW_TYPE_1D; imageSampler[i].type = VK_IMAGE_VIEW_TYPE_1D;
imageSampler[i].imageViewId = bufferView->id; imageSampler[i].imageViewId = bufferView->id;
...@@ -438,7 +438,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor ...@@ -438,7 +438,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor
for(uint32_t i = 0; i < entry.descriptorCount; i++) for(uint32_t i = 0; i < entry.descriptorCount; i++)
{ {
auto update = reinterpret_cast<VkDescriptorImageInfo const *>(src + entry.offset + entry.stride * i); auto update = reinterpret_cast<VkDescriptorImageInfo const *>(src + entry.offset + entry.stride * i);
auto imageView = Cast(update->imageView); auto imageView = vk::Cast(update->imageView);
descriptor[i].ptr = imageView->getOffsetPointer({0, 0, 0}, VK_IMAGE_ASPECT_COLOR_BIT, 0, 0); descriptor[i].ptr = imageView->getOffsetPointer({0, 0, 0}, VK_IMAGE_ASPECT_COLOR_BIT, 0, 0);
descriptor[i].extent = imageView->getMipLevelExtent(0); descriptor[i].extent = imageView->getMipLevelExtent(0);
descriptor[i].rowPitchBytes = imageView->rowPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, 0); descriptor[i].rowPitchBytes = imageView->rowPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, 0);
...@@ -467,7 +467,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor ...@@ -467,7 +467,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor
for (uint32_t i = 0; i < entry.descriptorCount; i++) for (uint32_t i = 0; i < entry.descriptorCount; i++)
{ {
auto update = reinterpret_cast<VkBufferView const *>(src + entry.offset + entry.stride * i); auto update = reinterpret_cast<VkBufferView const *>(src + entry.offset + entry.stride * i);
auto bufferView = Cast(*update); auto bufferView = vk::Cast(*update);
descriptor[i].ptr = bufferView->getPointer(); descriptor[i].ptr = bufferView->getPointer();
descriptor[i].extent = {bufferView->getElementCount(), 1, 1}; descriptor[i].extent = {bufferView->getElementCount(), 1, 1};
descriptor[i].rowPitchBytes = 0; descriptor[i].rowPitchBytes = 0;
...@@ -487,7 +487,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor ...@@ -487,7 +487,7 @@ void DescriptorSetLayout::WriteDescriptorSet(DescriptorSet *dstSet, VkDescriptor
for (uint32_t i = 0; i < entry.descriptorCount; i++) for (uint32_t i = 0; i < entry.descriptorCount; i++)
{ {
auto update = reinterpret_cast<VkDescriptorBufferInfo const *>(src + entry.offset + entry.stride * i); auto update = reinterpret_cast<VkDescriptorBufferInfo const *>(src + entry.offset + entry.stride * i);
auto buffer = Cast(update->buffer); auto buffer = vk::Cast(update->buffer);
descriptor[i].ptr = buffer->getOffsetPointer(update->offset); descriptor[i].ptr = buffer->getOffsetPointer(update->offset);
descriptor[i].sizeInBytes = static_cast<int>((update->range == VK_WHOLE_SIZE) ? buffer->getSize() - update->offset : update->range); descriptor[i].sizeInBytes = static_cast<int>((update->range == VK_WHOLE_SIZE) ? buffer->getSize() - update->offset : update->range);
descriptor[i].robustnessSize = static_cast<int>(buffer->getSize() - update->offset); descriptor[i].robustnessSize = static_cast<int>(buffer->getSize() - update->offset);
......
...@@ -141,7 +141,7 @@ private: ...@@ -141,7 +141,7 @@ private:
static inline DescriptorSetLayout* Cast(VkDescriptorSetLayout object) static inline DescriptorSetLayout* Cast(VkDescriptorSetLayout object)
{ {
return reinterpret_cast<DescriptorSetLayout*>(object.get()); return DescriptorSetLayout::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -22,7 +22,7 @@ namespace vk ...@@ -22,7 +22,7 @@ namespace vk
DescriptorUpdateTemplate::DescriptorUpdateTemplate(const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, void* mem) : DescriptorUpdateTemplate::DescriptorUpdateTemplate(const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, void* mem) :
descriptorUpdateEntryCount(pCreateInfo->descriptorUpdateEntryCount), descriptorUpdateEntryCount(pCreateInfo->descriptorUpdateEntryCount),
descriptorUpdateEntries(reinterpret_cast<VkDescriptorUpdateTemplateEntry*>(mem)), descriptorUpdateEntries(reinterpret_cast<VkDescriptorUpdateTemplateEntry*>(mem)),
descriptorSetLayout(Cast(pCreateInfo->descriptorSetLayout)) descriptorSetLayout(vk::Cast(pCreateInfo->descriptorSetLayout))
{ {
for(uint32_t i = 0; i < descriptorUpdateEntryCount; i++) for(uint32_t i = 0; i < descriptorUpdateEntryCount; i++)
{ {
......
...@@ -38,7 +38,7 @@ namespace vk ...@@ -38,7 +38,7 @@ namespace vk
static inline DescriptorUpdateTemplate* Cast(VkDescriptorUpdateTemplate object) static inline DescriptorUpdateTemplate* Cast(VkDescriptorUpdateTemplate object)
{ {
return reinterpret_cast<DescriptorUpdateTemplate*>(object.get()); return DescriptorUpdateTemplate::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -42,7 +42,7 @@ private: ...@@ -42,7 +42,7 @@ private:
static inline DeviceMemory* Cast(VkDeviceMemory object) static inline DeviceMemory* Cast(VkDeviceMemory object)
{ {
return reinterpret_cast<DeviceMemory*>(object.get()); return DeviceMemory::Cast(object);
} }
......
...@@ -70,7 +70,7 @@ private: ...@@ -70,7 +70,7 @@ private:
static inline Event* Cast(VkEvent object) static inline Event* Cast(VkEvent object)
{ {
return reinterpret_cast<Event*>(object.get()); return Event::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -80,7 +80,7 @@ private: ...@@ -80,7 +80,7 @@ private:
static inline Fence* Cast(VkFence object) static inline Fence* Cast(VkFence object)
{ {
return reinterpret_cast<Fence*>(object.get()); return Fence::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -27,7 +27,7 @@ Framebuffer::Framebuffer(const VkFramebufferCreateInfo* pCreateInfo, void* mem) ...@@ -27,7 +27,7 @@ Framebuffer::Framebuffer(const VkFramebufferCreateInfo* pCreateInfo, void* mem)
{ {
for(uint32_t i = 0; i < attachmentCount; i++) for(uint32_t i = 0; i < attachmentCount; i++)
{ {
attachments[i] = Cast(pCreateInfo->pAttachments[i]); attachments[i] = vk::Cast(pCreateInfo->pAttachments[i]);
} }
} }
......
...@@ -43,7 +43,7 @@ private: ...@@ -43,7 +43,7 @@ private:
static inline Framebuffer* Cast(VkFramebuffer object) static inline Framebuffer* Cast(VkFramebuffer object)
{ {
return reinterpret_cast<Framebuffer*>(object.get()); return Framebuffer::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -102,7 +102,7 @@ private: ...@@ -102,7 +102,7 @@ private:
static inline Image* Cast(VkImage object) static inline Image* Cast(VkImage object)
{ {
return reinterpret_cast<Image*>(object.get()); return Image::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -56,7 +56,7 @@ namespace vk ...@@ -56,7 +56,7 @@ namespace vk
std::atomic<uint32_t> ImageView::nextID(1); std::atomic<uint32_t> ImageView::nextID(1);
ImageView::ImageView(const VkImageViewCreateInfo* pCreateInfo, void* mem, const vk::SamplerYcbcrConversion *ycbcrConversion) : ImageView::ImageView(const VkImageViewCreateInfo* pCreateInfo, void* mem, const vk::SamplerYcbcrConversion *ycbcrConversion) :
image(Cast(pCreateInfo->image)), viewType(pCreateInfo->viewType), format(pCreateInfo->format), image(vk::Cast(pCreateInfo->image)), viewType(pCreateInfo->viewType), format(pCreateInfo->format),
components(ResolveComponentMapping(pCreateInfo->components, format)), components(ResolveComponentMapping(pCreateInfo->components, format)),
subresourceRange(ResolveRemainingLevelsLayers(pCreateInfo->subresourceRange, image)), subresourceRange(ResolveRemainingLevelsLayers(pCreateInfo->subresourceRange, image)),
ycbcrConversion(ycbcrConversion) ycbcrConversion(ycbcrConversion)
......
...@@ -104,7 +104,7 @@ inline VkComponentMapping ResolveIdentityMapping(VkComponentMapping m) ...@@ -104,7 +104,7 @@ inline VkComponentMapping ResolveIdentityMapping(VkComponentMapping m)
static inline ImageView* Cast(VkImageView object) static inline ImageView* Cast(VkImageView object)
{ {
return reinterpret_cast<ImageView*>(object.get()); return ImageView::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -25,6 +25,19 @@ ...@@ -25,6 +25,19 @@
namespace vk namespace vk
{ {
template<typename T, typename VkT>
static inline T* VkTtoT(VkT vkObject)
{
return static_cast<T*>(vkObject.get());
}
template<typename T, typename VkT>
static inline VkT TtoVkT(T* object)
{
return VkT(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(object)));
}
// For use in the placement new to make it verbose that we're allocating an object using device memory // For use in the placement new to make it verbose that we're allocating an object using device memory
static constexpr VkAllocationCallbacks* DEVICE_MEMORY = nullptr; static constexpr VkAllocationCallbacks* DEVICE_MEMORY = nullptr;
...@@ -92,7 +105,12 @@ public: ...@@ -92,7 +105,12 @@ public:
{ {
// The static_cast<T*> is used to make sure the returned pointer points to the // The static_cast<T*> is used to make sure the returned pointer points to the
// beginning of the object, even if the derived class uses multiple inheritance // beginning of the object, even if the derived class uses multiple inheritance
return reinterpret_cast<typename VkT::HandleType>(static_cast<T*>(this)); return vk::TtoVkT<T, VkT>(static_cast<T*>(this));
}
static inline T* Cast(VkT vkObject)
{
return vk::VkTtoT<T, VkT>(vkObject);
} }
}; };
......
...@@ -228,7 +228,7 @@ namespace vk ...@@ -228,7 +228,7 @@ namespace vk
Pipeline::Pipeline(PipelineLayout const *layout) : layout(layout) {} Pipeline::Pipeline(PipelineLayout const *layout) : layout(layout) {}
GraphicsPipeline::GraphicsPipeline(const VkGraphicsPipelineCreateInfo* pCreateInfo, void* mem) GraphicsPipeline::GraphicsPipeline(const VkGraphicsPipelineCreateInfo* pCreateInfo, void* mem)
: Pipeline(Cast(pCreateInfo->layout)) : Pipeline(vk::Cast(pCreateInfo->layout))
{ {
if(((pCreateInfo->flags & if(((pCreateInfo->flags &
~(VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT | ~(VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT |
...@@ -451,12 +451,12 @@ void GraphicsPipeline::compileShaders(const VkAllocationCallbacks* pAllocator, c ...@@ -451,12 +451,12 @@ void GraphicsPipeline::compileShaders(const VkAllocationCallbacks* pAllocator, c
UNIMPLEMENTED("pStage->flags"); UNIMPLEMENTED("pStage->flags");
} }
auto module = Cast(pStage->module); auto module = vk::Cast(pStage->module);
auto code = preprocessSpirv(module->getCode(), pStage->pSpecializationInfo); auto code = preprocessSpirv(module->getCode(), pStage->pSpecializationInfo);
// FIXME (b/119409619): use an allocator here so we can control all memory allocations // FIXME (b/119409619): use an allocator here so we can control all memory allocations
// TODO: also pass in any pipeline state which will affect shader compilation // TODO: also pass in any pipeline state which will affect shader compilation
auto spirvShader = new sw::SpirvShader{pStage, code, Cast(pCreateInfo->renderPass), pCreateInfo->subpass}; auto spirvShader = new sw::SpirvShader{pStage, code, vk::Cast(pCreateInfo->renderPass), pCreateInfo->subpass};
switch (pStage->stage) switch (pStage->stage)
{ {
...@@ -525,7 +525,7 @@ bool GraphicsPipeline::hasDynamicState(VkDynamicState dynamicState) const ...@@ -525,7 +525,7 @@ bool GraphicsPipeline::hasDynamicState(VkDynamicState dynamicState) const
} }
ComputePipeline::ComputePipeline(const VkComputePipelineCreateInfo* pCreateInfo, void* mem) ComputePipeline::ComputePipeline(const VkComputePipelineCreateInfo* pCreateInfo, void* mem)
: Pipeline(Cast(pCreateInfo->layout)) : Pipeline(vk::Cast(pCreateInfo->layout))
{ {
} }
...@@ -542,7 +542,7 @@ size_t ComputePipeline::ComputeRequiredAllocationSize(const VkComputePipelineCre ...@@ -542,7 +542,7 @@ size_t ComputePipeline::ComputeRequiredAllocationSize(const VkComputePipelineCre
void ComputePipeline::compileShaders(const VkAllocationCallbacks* pAllocator, const VkComputePipelineCreateInfo* pCreateInfo) void ComputePipeline::compileShaders(const VkAllocationCallbacks* pAllocator, const VkComputePipelineCreateInfo* pCreateInfo)
{ {
auto module = Cast(pCreateInfo->stage.module); auto module = vk::Cast(pCreateInfo->stage.module);
auto code = preprocessSpirv(module->getCode(), pCreateInfo->stage.pSpecializationInfo); auto code = preprocessSpirv(module->getCode(), pCreateInfo->stage.pSpecializationInfo);
......
...@@ -38,7 +38,12 @@ public: ...@@ -38,7 +38,12 @@ public:
operator VkPipeline() operator VkPipeline()
{ {
return reinterpret_cast<VkPipeline::HandleType>(this); return vk::TtoVkT<Pipeline, VkPipeline>(this);
}
static inline Pipeline* Cast(VkPipeline object)
{
return vk::VkTtoT<Pipeline, VkPipeline>(object);
} }
void destroy(const VkAllocationCallbacks* pAllocator) void destroy(const VkAllocationCallbacks* pAllocator)
...@@ -124,7 +129,7 @@ protected: ...@@ -124,7 +129,7 @@ protected:
static inline Pipeline* Cast(VkPipeline object) static inline Pipeline* Cast(VkPipeline object)
{ {
return reinterpret_cast<Pipeline*>(object.get()); return Pipeline::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -47,7 +47,7 @@ private: ...@@ -47,7 +47,7 @@ private:
static inline PipelineCache* Cast(VkPipelineCache object) static inline PipelineCache* Cast(VkPipelineCache object)
{ {
return reinterpret_cast<PipelineCache*>(object.get()); return PipelineCache::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -27,7 +27,7 @@ PipelineLayout::PipelineLayout(const VkPipelineLayoutCreateInfo* pCreateInfo, vo ...@@ -27,7 +27,7 @@ PipelineLayout::PipelineLayout(const VkPipelineLayoutCreateInfo* pCreateInfo, vo
setLayouts = reinterpret_cast<DescriptorSetLayout**>(hostMem); setLayouts = reinterpret_cast<DescriptorSetLayout**>(hostMem);
for(uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++) for(uint32_t i = 0; i < pCreateInfo->setLayoutCount; i++)
{ {
setLayouts[i] = Cast(pCreateInfo->pSetLayouts[i]); setLayouts[i] = vk::Cast(pCreateInfo->pSetLayouts[i]);
} }
hostMem += setLayoutsSize; hostMem += setLayoutsSize;
......
...@@ -45,7 +45,7 @@ private: ...@@ -45,7 +45,7 @@ private:
static inline PipelineLayout* Cast(VkPipelineLayout object) static inline PipelineLayout* Cast(VkPipelineLayout object)
{ {
return reinterpret_cast<PipelineLayout*>(object.get()); return PipelineLayout::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -114,7 +114,7 @@ private: ...@@ -114,7 +114,7 @@ private:
static inline QueryPool* Cast(VkQueryPool object) static inline QueryPool* Cast(VkQueryPool object)
{ {
return reinterpret_cast<QueryPool*>(object.get()); return QueryPool::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -89,7 +89,7 @@ private: ...@@ -89,7 +89,7 @@ private:
static inline RenderPass* Cast(VkRenderPass object) static inline RenderPass* Cast(VkRenderPass object)
{ {
return reinterpret_cast<RenderPass*>(object.get()); return RenderPass::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -116,12 +116,12 @@ public: ...@@ -116,12 +116,12 @@ public:
static inline Sampler* Cast(VkSampler object) static inline Sampler* Cast(VkSampler object)
{ {
return reinterpret_cast<Sampler*>(object.get()); return Sampler::Cast(object);
} }
static inline SamplerYcbcrConversion* Cast(VkSamplerYcbcrConversion object) static inline SamplerYcbcrConversion* Cast(VkSamplerYcbcrConversion object)
{ {
return reinterpret_cast<SamplerYcbcrConversion*>(object.get()); return SamplerYcbcrConversion::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -52,7 +52,7 @@ private: ...@@ -52,7 +52,7 @@ private:
static inline Semaphore* Cast(VkSemaphore object) static inline Semaphore* Cast(VkSemaphore object)
{ {
return reinterpret_cast<Semaphore*>(object.get()); return Semaphore::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -44,7 +44,7 @@ private: ...@@ -44,7 +44,7 @@ private:
static inline ShaderModule* Cast(VkShaderModule object) static inline ShaderModule* Cast(VkShaderModule object)
{ {
return reinterpret_cast<ShaderModule*>(object.get()); return ShaderModule::Cast(object);
} }
} // namespace vk } // namespace vk
......
...@@ -18,90 +18,26 @@ ...@@ -18,90 +18,26 @@
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
template<typename HandleType> class VkHandle template<typename T> class VkNonDispatchableHandle
{ {
public: public:
VkHandle(HandleType handle) VkNonDispatchableHandle(uint64_t h) : handle(h)
{ {
u.dummy = 0; static_assert(sizeof(VkNonDispatchableHandle) == sizeof(uint64_t), "Size is not 64 bits!");
u.handle = handle;
}
HandleType get() const
{
return u.handle;
} }
operator HandleType() const void* get() const
{ {
return u.handle; return reinterpret_cast<void*>(static_cast<uintptr_t>(handle));
} }
protected: operator void*() const
HandleType set(HandleType handle)
{ {
return (u.handle = handle); return get();
} }
private: private:
union PointerHandleUnion uint64_t handle;
{
HandleType handle;
uint64_t dummy; // VkNonDispatchableHandle's size must always be 64 bits even when void* is 32 bits
};
PointerHandleUnion u;
};
template<typename T> class VkNonDispatchableHandleBase : public VkHandle<T>
{
public:
using HandleType = T;
VkNonDispatchableHandleBase(HandleType handle) : VkHandle<T>(handle)
{
}
void operator=(HandleType handle)
{
this->set(handle);
}
};
// VkDescriptorSet objects are really just memory in the VkDescriptorPool
// object, so define different/more convenient operators for this object.
struct VkDescriptorSet_T;
template<> class VkNonDispatchableHandleBase<VkDescriptorSet_T*> : public VkHandle<uint8_t*>
{
public:
using HandleType = uint8_t*;
VkNonDispatchableHandleBase(HandleType handle) : VkHandle<uint8_t*>(handle)
{
}
HandleType operator+(ptrdiff_t rhs) const
{
return get() + rhs;
}
HandleType operator+=(ptrdiff_t rhs)
{
return this->set(get() + rhs);
}
ptrdiff_t operator-(const HandleType rhs) const
{
return get() - rhs;
}
};
template<typename T> class VkNonDispatchableHandle : public VkNonDispatchableHandleBase<T>
{
public:
VkNonDispatchableHandle(typename VkNonDispatchableHandleBase<T>::HandleType handle) : VkNonDispatchableHandleBase<T>(handle)
{
static_assert(sizeof(VkNonDispatchableHandle) == sizeof(uint64_t), "Size is not 64 bits!");
}
}; };
#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \ #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
......
...@@ -61,7 +61,12 @@ public: ...@@ -61,7 +61,12 @@ public:
operator VkSurfaceKHR() operator VkSurfaceKHR()
{ {
return reinterpret_cast<VkSurfaceKHR::HandleType>(this); return vk::TtoVkT<SurfaceKHR, VkSurfaceKHR>(this);
}
static inline SurfaceKHR* Cast(VkSurfaceKHR object)
{
return vk::VkTtoT<SurfaceKHR, VkSurfaceKHR>(object);
} }
void destroy(const VkAllocationCallbacks* pAllocator) void destroy(const VkAllocationCallbacks* pAllocator)
...@@ -93,7 +98,7 @@ private: ...@@ -93,7 +98,7 @@ private:
static inline SurfaceKHR* Cast(VkSurfaceKHR object) static inline SurfaceKHR* Cast(VkSurfaceKHR object)
{ {
return reinterpret_cast<SurfaceKHR*>(object.get()); return SurfaceKHR::Cast(object);
} }
} }
......
...@@ -26,7 +26,7 @@ namespace vk ...@@ -26,7 +26,7 @@ namespace vk
{ {
SwapchainKHR::SwapchainKHR(const VkSwapchainCreateInfoKHR *pCreateInfo, void *mem) : SwapchainKHR::SwapchainKHR(const VkSwapchainCreateInfoKHR *pCreateInfo, void *mem) :
surface(Cast(pCreateInfo->surface)), surface(vk::Cast(pCreateInfo->surface)),
images(reinterpret_cast<PresentImage*>(mem)), images(reinterpret_cast<PresentImage*>(mem)),
imageCount(pCreateInfo->minImageCount), imageCount(pCreateInfo->minImageCount),
retired(false) retired(false)
......
...@@ -59,7 +59,7 @@ private: ...@@ -59,7 +59,7 @@ private:
static inline SwapchainKHR* Cast(VkSwapchainKHR object) static inline SwapchainKHR* Cast(VkSwapchainKHR object)
{ {
return reinterpret_cast<SwapchainKHR*>(object.get()); return SwapchainKHR::Cast(object);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment