Commit 67cf8a92 by Alexis Hetu Committed by Alexis Hétu

32 bit safe code

In order to easily add 32 bit support, this cl adds a header which essentially uses a union to make sure that even when we have 32 bit pointers, the Vulkan handles are 64 bit. This change should be noop. Bug b/129979580 b/127920555 Change-Id: I54254929186584ec4544a1da5a7def7cf56e392e Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/31070 Presubmit-Ready: Alexis Hétu <sugoi@google.com> Kokoro-Presubmit: kokoro <noreply+kokoro@google.com> Reviewed-by: 's avatarNicolas Capens <nicolascapens@google.com> Tested-by: 's avatarAlexis Hétu <sugoi@google.com>
parent 3e35131f
...@@ -23,6 +23,10 @@ ...@@ -23,6 +23,10 @@
#include <string> #include <string>
#ifdef Status
#undef Status // b/127920555
#endif
namespace sw namespace sw
{ {
class SwiftConfig class SwiftConfig
......
...@@ -22,10 +22,6 @@ ...@@ -22,10 +22,6 @@
#include "Vulkan/VkDebug.hpp" #include "Vulkan/VkDebug.hpp"
#include "Vulkan/VkPipelineLayout.hpp" #include "Vulkan/VkPipelineLayout.hpp"
#ifdef Bool
#undef Bool // b/127920555
#endif
namespace sw namespace sw
{ {
extern bool postBlendSRGB; extern bool postBlendSRGB;
......
...@@ -29,11 +29,6 @@ ...@@ -29,11 +29,6 @@
#include <spirv/unified1/spirv.hpp> #include <spirv/unified1/spirv.hpp>
#include <spirv/unified1/GLSL.std.450.h> #include <spirv/unified1/GLSL.std.450.h>
#ifdef Bool
#undef Bool // b/127920555
#undef None
#endif
namespace namespace
{ {
constexpr float PI = 3.141592653589793f; constexpr float PI = 3.141592653589793f;
......
...@@ -30,11 +30,6 @@ ...@@ -30,11 +30,6 @@
#include <mutex> #include <mutex>
#ifdef Bool
#undef Bool // b/127920555
#undef None
#endif
namespace sw { namespace sw {
SpirvShader::ImageSampler *SpirvShader::getImageSampler(uint32_t inst, vk::SampledImageDescriptor const *imageDescriptor, const vk::Sampler *sampler) SpirvShader::ImageSampler *SpirvShader::getImageSampler(uint32_t inst, vk::SampledImageDescriptor const *imageDescriptor, const vk::Sampler *sampler)
......
...@@ -52,7 +52,7 @@ private: ...@@ -52,7 +52,7 @@ private:
static inline Buffer* Cast(VkBuffer object) static inline Buffer* Cast(VkBuffer object)
{ {
return reinterpret_cast<Buffer*>(object); return reinterpret_cast<Buffer*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -48,7 +48,7 @@ private: ...@@ -48,7 +48,7 @@ private:
static inline BufferView* Cast(VkBufferView object) static inline BufferView* Cast(VkBufferView object)
{ {
return reinterpret_cast<BufferView*>(object); return reinterpret_cast<BufferView*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -41,7 +41,7 @@ private: ...@@ -41,7 +41,7 @@ private:
static inline CommandPool* Cast(VkCommandPool object) static inline CommandPool* Cast(VkCommandPool object)
{ {
return reinterpret_cast<CommandPool*>(object); return reinterpret_cast<CommandPool*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include "Version.h" #include "Version.h"
#include <vulkan/vulkan_core.h> #include <Vulkan/VulkanPlatform.h>
namespace vk namespace vk
{ {
......
...@@ -24,7 +24,7 @@ namespace vk ...@@ -24,7 +24,7 @@ namespace vk
{ {
DescriptorPool::DescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo, void* mem) : DescriptorPool::DescriptorPool(const VkDescriptorPoolCreateInfo* pCreateInfo, void* mem) :
pool(reinterpret_cast<VkDescriptorSet>(mem)), pool(static_cast<uint8_t*>(mem)),
poolSize(ComputeRequiredAllocationSize(pCreateInfo)) poolSize(ComputeRequiredAllocationSize(pCreateInfo))
{ {
} }
...@@ -77,17 +77,17 @@ VkDescriptorSet DescriptorPool::findAvailableMemory(size_t size) ...@@ -77,17 +77,17 @@ VkDescriptorSet DescriptorPool::findAvailableMemory(size_t size)
// First, look for space at the end of the pool // First, look for space at the end of the pool
const auto itLast = nodes.rbegin(); const auto itLast = nodes.rbegin();
ptrdiff_t itemStart = reinterpret_cast<char*>(itLast->set) - reinterpret_cast<char*>(pool); ptrdiff_t itemStart = itLast->set - pool;
ptrdiff_t nextItemStart = itemStart + itLast->size; ptrdiff_t nextItemStart = itemStart + itLast->size;
size_t freeSpace = poolSize - nextItemStart; size_t freeSpace = poolSize - nextItemStart;
if(freeSpace >= size) if(freeSpace >= size)
{ {
return reinterpret_cast<VkDescriptorSet>(reinterpret_cast<char*>(pool) + nextItemStart); return pool + nextItemStart;
} }
// Second, look for space at the beginning of the pool // Second, look for space at the beginning of the pool
const auto itBegin = nodes.end(); const auto itBegin = nodes.end();
freeSpace = reinterpret_cast<char*>(itBegin->set) - reinterpret_cast<char*>(pool); freeSpace = itBegin->set - pool;
if(freeSpace >= size) if(freeSpace >= size)
{ {
return pool; return pool;
...@@ -99,8 +99,8 @@ VkDescriptorSet DescriptorPool::findAvailableMemory(size_t size) ...@@ -99,8 +99,8 @@ VkDescriptorSet DescriptorPool::findAvailableMemory(size_t size)
++nextIt; ++nextIt;
for(auto it = itBegin; nextIt != itEnd; ++it, ++nextIt) for(auto it = itBegin; nextIt != itEnd; ++it, ++nextIt)
{ {
VkDescriptorSet freeSpaceStart = reinterpret_cast<VkDescriptorSet>(reinterpret_cast<char*>(it->set) + it->size); VkDescriptorSet freeSpaceStart(it->set + it->size);
freeSpace = reinterpret_cast<char*>(nextIt->set) - reinterpret_cast<char*>(freeSpaceStart); freeSpace = nextIt->set - freeSpaceStart;
if(freeSpace >= size) if(freeSpace >= size)
{ {
return freeSpaceStart; return freeSpaceStart;
...@@ -132,7 +132,7 @@ VkResult DescriptorPool::allocateSets(size_t* sizes, uint32_t numAllocs, VkDescr ...@@ -132,7 +132,7 @@ VkResult DescriptorPool::allocateSets(size_t* sizes, uint32_t numAllocs, VkDescr
{ {
pDescriptorSets[i] = memory; pDescriptorSets[i] = memory;
nodes.insert(Node(pDescriptorSets[i], sizes[i])); nodes.insert(Node(pDescriptorSets[i], sizes[i]));
memory = reinterpret_cast<VkDescriptorSet>(reinterpret_cast<char*>(memory) + sizes[i]); memory += sizes[i];
} }
return VK_SUCCESS; return VK_SUCCESS;
...@@ -193,11 +193,11 @@ size_t DescriptorPool::computeTotalFreeSize() const ...@@ -193,11 +193,11 @@ size_t DescriptorPool::computeTotalFreeSize() const
// Compute space at the end of the pool // Compute space at the end of the pool
const auto itLast = nodes.rbegin(); const auto itLast = nodes.rbegin();
totalFreeSize += poolSize - ((reinterpret_cast<char*>(itLast->set) - reinterpret_cast<char*>(pool)) + itLast->size); totalFreeSize += poolSize - (itLast->set - pool) + itLast->size;
// Compute space at the beginning of the pool // Compute space at the beginning of the pool
const auto itBegin = nodes.end(); const auto itBegin = nodes.end();
totalFreeSize += reinterpret_cast<char*>(itBegin->set) - reinterpret_cast<char*>(pool); totalFreeSize += itBegin->set - pool;
// Finally, look between existing pool items // Finally, look between existing pool items
const auto itEnd = nodes.end(); const auto itEnd = nodes.end();
...@@ -205,7 +205,7 @@ size_t DescriptorPool::computeTotalFreeSize() const ...@@ -205,7 +205,7 @@ size_t DescriptorPool::computeTotalFreeSize() const
++nextIt; ++nextIt;
for(auto it = itBegin; nextIt != itEnd; ++it, ++nextIt) for(auto it = itBegin; nextIt != itEnd; ++it, ++nextIt)
{ {
totalFreeSize += (reinterpret_cast<char*>(nextIt->set) - reinterpret_cast<char*>(it->set)) - it->size; totalFreeSize += (nextIt->set - it->set) - it->size;
} }
return totalFreeSize; return totalFreeSize;
......
...@@ -45,18 +45,18 @@ namespace vk ...@@ -45,18 +45,18 @@ namespace vk
bool operator<(const Node& node) const { return this->set < node.set; } bool operator<(const Node& node) const { return this->set < node.set; }
bool operator==(VkDescriptorSet set) const { return this->set == set; } bool operator==(VkDescriptorSet set) const { return this->set == set; }
VkDescriptorSet set; VkDescriptorSet set = VK_NULL_HANDLE;
size_t size; size_t size = 0;
}; };
std::set<Node> nodes; std::set<Node> nodes;
VkDescriptorSet pool = nullptr; VkDescriptorSet pool = VK_NULL_HANDLE;
size_t poolSize = 0; size_t poolSize = 0;
}; };
static inline DescriptorPool* Cast(VkDescriptorPool object) static inline DescriptorPool* Cast(VkDescriptorPool object)
{ {
return reinterpret_cast<DescriptorPool*>(object); return reinterpret_cast<DescriptorPool*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -41,7 +41,7 @@ namespace vk ...@@ -41,7 +41,7 @@ namespace vk
inline DescriptorSet* Cast(VkDescriptorSet object) inline DescriptorSet* Cast(VkDescriptorSet object)
{ {
return reinterpret_cast<DescriptorSet*>(object); return reinterpret_cast<DescriptorSet*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -135,7 +135,7 @@ private: ...@@ -135,7 +135,7 @@ private:
static inline DescriptorSetLayout* Cast(VkDescriptorSetLayout object) static inline DescriptorSetLayout* Cast(VkDescriptorSetLayout object)
{ {
return reinterpret_cast<DescriptorSetLayout*>(object); return reinterpret_cast<DescriptorSetLayout*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -39,7 +39,7 @@ namespace vk ...@@ -39,7 +39,7 @@ namespace vk
static inline DescriptorUpdateTemplate* Cast(VkDescriptorUpdateTemplate object) static inline DescriptorUpdateTemplate* Cast(VkDescriptorUpdateTemplate object)
{ {
return reinterpret_cast<DescriptorUpdateTemplate*>(object); return reinterpret_cast<DescriptorUpdateTemplate*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -43,7 +43,7 @@ private: ...@@ -43,7 +43,7 @@ private:
static inline DeviceMemory* Cast(VkDeviceMemory object) static inline DeviceMemory* Cast(VkDeviceMemory object)
{ {
return reinterpret_cast<DeviceMemory*>(object); return reinterpret_cast<DeviceMemory*>(object.get());
} }
......
...@@ -72,7 +72,7 @@ private: ...@@ -72,7 +72,7 @@ private:
static inline Event* Cast(VkEvent object) static inline Event* Cast(VkEvent object)
{ {
return reinterpret_cast<Event*>(object); return reinterpret_cast<Event*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -108,7 +108,7 @@ private: ...@@ -108,7 +108,7 @@ private:
static inline Fence* Cast(VkFence object) static inline Fence* Cast(VkFence object)
{ {
return reinterpret_cast<Fence*>(object); return reinterpret_cast<Fence*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#ifndef VK_FORMAT_UTILS_HPP_ #ifndef VK_FORMAT_UTILS_HPP_
#define VK_FORMAT_UTILS_HPP_ #define VK_FORMAT_UTILS_HPP_
#include <vulkan/vulkan_core.h> #include <Vulkan/VulkanPlatform.h>
namespace sw namespace sw
{ {
......
...@@ -44,7 +44,7 @@ private: ...@@ -44,7 +44,7 @@ private:
static inline Framebuffer* Cast(VkFramebuffer object) static inline Framebuffer* Cast(VkFramebuffer object)
{ {
return reinterpret_cast<Framebuffer*>(object); return reinterpret_cast<Framebuffer*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -17,8 +17,6 @@ ...@@ -17,8 +17,6 @@
#include <unordered_map> #include <unordered_map>
#include <string> #include <string>
#include <vulkan/vulkan.h>
#ifdef __ANDROID__ #ifdef __ANDROID__
#include <cerrno> #include <cerrno>
#include <hardware/hwvulkan.h> #include <hardware/hwvulkan.h>
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#ifndef VK_UTILS_HPP_ #ifndef VK_UTILS_HPP_
#define VK_UTILS_HPP_ #define VK_UTILS_HPP_
#include <vulkan/vulkan_core.h> #include <Vulkan/VulkanPlatform.h>
namespace vk namespace vk
{ {
......
...@@ -111,7 +111,7 @@ private: ...@@ -111,7 +111,7 @@ private:
static inline Image* Cast(VkImage object) static inline Image* Cast(VkImage object)
{ {
return reinterpret_cast<Image*>(object); return reinterpret_cast<Image*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -89,7 +89,7 @@ private: ...@@ -89,7 +89,7 @@ private:
static inline ImageView* Cast(VkImageView object) static inline ImageView* Cast(VkImageView object)
{ {
return reinterpret_cast<ImageView*>(object); return reinterpret_cast<ImageView*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#ifndef VK_MEMORY_HPP_ #ifndef VK_MEMORY_HPP_
#define VK_MEMORY_HPP_ #define VK_MEMORY_HPP_
#include <vulkan/vulkan_core.h> #include <Vulkan/VulkanPlatform.h>
namespace vk namespace vk
{ {
...@@ -27,7 +27,7 @@ void deallocate(void* ptr, const VkAllocationCallbacks* pAllocator); ...@@ -27,7 +27,7 @@ void deallocate(void* ptr, const VkAllocationCallbacks* pAllocator);
template <typename T> template <typename T>
T* allocate(size_t count, const VkAllocationCallbacks* pAllocator) T* allocate(size_t count, const VkAllocationCallbacks* pAllocator)
{ {
return reinterpret_cast<T*>(allocate(count, alignof(T), pAllocator, T::GetAllocationScope())); return static_cast<T*>(allocate(count, alignof(T), pAllocator, T::GetAllocationScope()));
} }
} // namespace vk } // namespace vk
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include "VkMemory.h" #include "VkMemory.h"
#include <new> #include <new>
#include <vulkan/vulkan_core.h> #include <Vulkan/VulkanPlatform.h>
#include <vulkan/vk_icd.h> #include <vulkan/vk_icd.h>
namespace vk namespace vk
...@@ -97,7 +97,7 @@ class Object : public ObjectBase<T, VkT> ...@@ -97,7 +97,7 @@ class Object : public ObjectBase<T, VkT>
public: public:
operator VkT() operator VkT()
{ {
return reinterpret_cast<VkT>(this); return reinterpret_cast<typename VkT::HandleType>(this);
} }
}; };
......
...@@ -37,7 +37,7 @@ public: ...@@ -37,7 +37,7 @@ public:
operator VkPipeline() operator VkPipeline()
{ {
return reinterpret_cast<VkPipeline>(this); return reinterpret_cast<VkPipeline::HandleType>(this);
} }
void destroy(const VkAllocationCallbacks* pAllocator) void destroy(const VkAllocationCallbacks* pAllocator)
...@@ -125,7 +125,7 @@ protected: ...@@ -125,7 +125,7 @@ protected:
static inline Pipeline* Cast(VkPipeline object) static inline Pipeline* Cast(VkPipeline object)
{ {
return reinterpret_cast<Pipeline*>(object); return reinterpret_cast<Pipeline*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -48,7 +48,7 @@ private: ...@@ -48,7 +48,7 @@ private:
static inline PipelineCache* Cast(VkPipelineCache object) static inline PipelineCache* Cast(VkPipelineCache object)
{ {
return reinterpret_cast<PipelineCache*>(object); return reinterpret_cast<PipelineCache*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -46,7 +46,7 @@ private: ...@@ -46,7 +46,7 @@ private:
static inline PipelineLayout* Cast(VkPipelineLayout object) static inline PipelineLayout* Cast(VkPipelineLayout object)
{ {
return reinterpret_cast<PipelineLayout*>(object); return reinterpret_cast<PipelineLayout*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
// VK_KHR_storage_buffer_storage_class (no functions in this extension) // VK_KHR_storage_buffer_storage_class (no functions in this extension)
// VK_KHR_variable_pointers (no functions in this extension) // VK_KHR_variable_pointers (no functions in this extension)
#include <vulkan/vulkan_core.h> #include <Vulkan/VulkanPlatform.h>
extern "C" extern "C"
{ {
......
...@@ -67,7 +67,7 @@ private: ...@@ -67,7 +67,7 @@ private:
static inline QueryPool* Cast(VkQueryPool object) static inline QueryPool* Cast(VkQueryPool object)
{ {
return reinterpret_cast<QueryPool*>(object); return reinterpret_cast<QueryPool*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -45,22 +45,22 @@ VkSubmitInfo* DeepCopySubmitInfo(uint32_t submitCount, const VkSubmitInfo* pSubm ...@@ -45,22 +45,22 @@ VkSubmitInfo* DeepCopySubmitInfo(uint32_t submitCount, const VkSubmitInfo* pSubm
for(uint32_t i = 0; i < submitCount; i++) for(uint32_t i = 0; i < submitCount; i++)
{ {
size_t size = pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore); size_t size = pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore);
submits[i].pWaitSemaphores = new (mem) VkSemaphore[pSubmits[i].waitSemaphoreCount]; submits[i].pWaitSemaphores = reinterpret_cast<const VkSemaphore*>(mem);
memcpy(mem, pSubmits[i].pWaitSemaphores, size); memcpy(mem, pSubmits[i].pWaitSemaphores, size);
mem += size; mem += size;
size = pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags); size = pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags);
submits[i].pWaitDstStageMask = new (mem) VkPipelineStageFlags[pSubmits[i].waitSemaphoreCount]; submits[i].pWaitDstStageMask = reinterpret_cast<const VkPipelineStageFlags*>(mem);
memcpy(mem, pSubmits[i].pWaitDstStageMask, size); memcpy(mem, pSubmits[i].pWaitDstStageMask, size);
mem += size; mem += size;
size = pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore); size = pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore);
submits[i].pSignalSemaphores = new (mem) VkSemaphore[pSubmits[i].signalSemaphoreCount]; submits[i].pSignalSemaphores = reinterpret_cast<const VkSemaphore*>(mem);
memcpy(mem, pSubmits[i].pSignalSemaphores, size); memcpy(mem, pSubmits[i].pSignalSemaphores, size);
mem += size; mem += size;
size = pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer); size = pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer);
submits[i].pCommandBuffers = new (mem) VkCommandBuffer[pSubmits[i].commandBufferCount]; submits[i].pCommandBuffers = reinterpret_cast<const VkCommandBuffer*>(mem);
memcpy(mem, pSubmits[i].pCommandBuffers, size); memcpy(mem, pSubmits[i].pCommandBuffers, size);
mem += size; mem += size;
} }
......
...@@ -90,7 +90,7 @@ private: ...@@ -90,7 +90,7 @@ private:
static inline RenderPass* Cast(VkRenderPass object) static inline RenderPass* Cast(VkRenderPass object)
{ {
return reinterpret_cast<RenderPass*>(object); return reinterpret_cast<RenderPass*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -82,7 +82,7 @@ private: ...@@ -82,7 +82,7 @@ private:
static inline Sampler* Cast(VkSampler object) static inline Sampler* Cast(VkSampler object)
{ {
return reinterpret_cast<Sampler*>(object); return reinterpret_cast<Sampler*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -54,7 +54,7 @@ private: ...@@ -54,7 +54,7 @@ private:
static inline Semaphore* Cast(VkSemaphore object) static inline Semaphore* Cast(VkSemaphore object)
{ {
return reinterpret_cast<Semaphore*>(object); return reinterpret_cast<Semaphore*>(object.get());
} }
} // namespace vk } // namespace vk
......
...@@ -45,7 +45,7 @@ private: ...@@ -45,7 +45,7 @@ private:
static inline ShaderModule* Cast(VkShaderModule object) static inline ShaderModule* Cast(VkShaderModule object)
{ {
return reinterpret_cast<ShaderModule*>(object); return reinterpret_cast<ShaderModule*>(object.get());
} }
} // namespace vk } // namespace vk
......
// Copyright 2019 The SwiftShader Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef VULKAN_PLATFORM
#define VULKAN_PLATFORM
#include <cstddef>
#include <cstdint>
template<typename HandleType> class alignas(sizeof(uint64_t)) VkWrapperBase
{
public:
VkWrapperBase(HandleType handle)
{
u.dummy = 0;
u.handle = handle;
}
HandleType get() const
{
return u.handle;
}
operator HandleType() const
{
return u.handle;
}
protected:
HandleType set(HandleType handle)
{
return (u.handle = handle);
}
private:
union PointerHandleUnion
{
HandleType handle;
uint64_t dummy; // VkWrapper's size must always be 64 bits even when void* is 32 bits
};
PointerHandleUnion u;
};
template<typename T> class alignas(sizeof(uint64_t)) VkWrapper : public VkWrapperBase<T>
{
public:
using HandleType = T;
VkWrapper() : VkWrapperBase<T>(nullptr)
{
}
VkWrapper(HandleType handle) : VkWrapperBase<T>(handle)
{
static_assert(sizeof(VkWrapper) == sizeof(uint64_t), "Size is not 64 bits!");
}
void operator=(HandleType handle)
{
this->set(handle);
}
};
// VkDescriptorSet objects are really just memory in the VkDescriptorPool
// object, so define different/more convenient operators for this object.
struct VkDescriptorSet_T;
template<> class alignas(sizeof(uint64_t)) VkWrapper<VkDescriptorSet_T*> : public VkWrapperBase<uint8_t*>
{
public:
using HandleType = uint8_t*;
VkWrapper(HandleType handle) : VkWrapperBase<uint8_t*>(handle)
{
static_assert(sizeof(VkWrapper) == sizeof(uint64_t), "Size is not 64 bits!");
}
HandleType operator+(ptrdiff_t rhs) const
{
return get() + rhs;
}
HandleType operator+=(ptrdiff_t rhs)
{
return this->set(get() + rhs);
}
ptrdiff_t operator-(const HandleType rhs) const
{
return get() - rhs;
}
};
#define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
typedef struct object##_T *object##Ptr; \
typedef VkWrapper<object##Ptr> object;
#include <vulkan/vulkan.h>
#ifdef Bool
#undef Bool // b/127920555
#undef None
#endif
#endif // VULKAN_PLATFORM
...@@ -224,6 +224,7 @@ IF EXIST "$(SolutionDir)..\deqp\build\external\vulkancts\modules\vulkan\" (copy ...@@ -224,6 +224,7 @@ IF EXIST "$(SolutionDir)..\deqp\build\external\vulkancts\modules\vulkan\" (copy
<ClInclude Include="VkSampler.hpp" /> <ClInclude Include="VkSampler.hpp" />
<ClInclude Include="VkSemaphore.hpp" /> <ClInclude Include="VkSemaphore.hpp" />
<ClInclude Include="VkShaderModule.hpp" /> <ClInclude Include="VkShaderModule.hpp" />
<ClInclude Include="VulkanPlatform.h" />
<ClInclude Include="..\Device\Blitter.hpp" /> <ClInclude Include="..\Device\Blitter.hpp" />
<ClInclude Include="..\Device\Clipper.hpp" /> <ClInclude Include="..\Device\Clipper.hpp" />
<ClInclude Include="..\Device\Color.hpp" /> <ClInclude Include="..\Device\Color.hpp" />
......
...@@ -518,6 +518,9 @@ ...@@ -518,6 +518,9 @@
<ClInclude Include="VkDestroy.h"> <ClInclude Include="VkDestroy.h">
<Filter>Header Files\Vulkan</Filter> <Filter>Header Files\Vulkan</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="VulkanPlatform.h">
<Filter>Header Files\Vulkan</Filter>
</ClInclude>
<ClInclude Include="Version.h" /> <ClInclude Include="Version.h" />
<ClInclude Include="..\Pipeline\SpirvShader.hpp"> <ClInclude Include="..\Pipeline\SpirvShader.hpp">
<Filter>Header Files\Pipeline</Filter> <Filter>Header Files\Pipeline</Filter>
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#define SWIFTSHADER_VKSURFACEKHR_HPP_ #define SWIFTSHADER_VKSURFACEKHR_HPP_
#include "Vulkan/VkObject.hpp" #include "Vulkan/VkObject.hpp"
#include <vulkan/vulkan.h> #include <Vulkan/VulkanPlatform.h>
#include <vector> #include <vector>
namespace vk namespace vk
...@@ -42,7 +42,7 @@ class SurfaceKHR ...@@ -42,7 +42,7 @@ class SurfaceKHR
public: public:
operator VkSurfaceKHR() operator VkSurfaceKHR()
{ {
return reinterpret_cast<VkSurfaceKHR>(this); return reinterpret_cast<VkSurfaceKHR::HandleType>(this);
} }
void destroy(const VkAllocationCallbacks* pAllocator) void destroy(const VkAllocationCallbacks* pAllocator)
...@@ -85,7 +85,7 @@ private: ...@@ -85,7 +85,7 @@ private:
static inline SurfaceKHR* Cast(VkSurfaceKHR object) static inline SurfaceKHR* Cast(VkSurfaceKHR object)
{ {
return reinterpret_cast<SurfaceKHR*>(object); return reinterpret_cast<SurfaceKHR*>(object.get());
} }
} }
......
...@@ -56,7 +56,7 @@ private: ...@@ -56,7 +56,7 @@ private:
static inline SwapchainKHR* Cast(VkSwapchainKHR object) static inline SwapchainKHR* Cast(VkSwapchainKHR object)
{ {
return reinterpret_cast<SwapchainKHR*>(object); return reinterpret_cast<SwapchainKHR*>(object.get());
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment