Commit c30f45d3 by Shahbaz Youssefi Committed by Commit Bot

Vulkan: Rename PipelineDesc/Cache to Graphics&

PipelineDesc describes a Vertex-Fragment pipeline and PipelineCache (not to be confused with vk::PipelineCache) implements a cache of such pipeline objects. In preparation for Compute support, these data structures are prefixed with Graphics. Bug: angleproject:2959 Change-Id: I9181586fb946b787216ca0b2ad6340f90c3ab55f Reviewed-on: https://chromium-review.googlesource.com/c/1333971Reviewed-by: 's avatarJamie Madill <jmadill@chromium.org> Commit-Queue: Shahbaz Youssefi <syoussefi@chromium.org>
parent d9ee8bf9
......@@ -267,7 +267,7 @@ class ContextVk : public ContextImpl, public vk::Context
// Keep a cached pipeline description structure that can be used to query the pipeline cache.
// Kept in a pointer so allocations can be aligned, and structs can be portably packed.
std::unique_ptr<vk::PipelineDesc> mPipelineDesc;
std::unique_ptr<vk::GraphicsPipelineDesc> mGraphicsPipelineDesc;
// The descriptor pools are externally sychronized, so cannot be accessed from different
// threads simultaneously. Hence, we keep them in the ContextVk instead of the RendererVk.
......
......@@ -1043,7 +1043,7 @@ angle::Result FramebufferVk::clearWithDraw(ContextVk *contextVk,
bool invertViewport = contextVk->isViewportFlipEnabledForDrawFBO();
// This pipeline desc could be cached.
vk::PipelineDesc pipelineDesc;
vk::GraphicsPipelineDesc pipelineDesc;
pipelineDesc.initDefaults();
pipelineDesc.updateColorWriteMask(colorMaskFlags, getEmulatedAlphaAttachmentMask());
pipelineDesc.updateRenderPassDesc(getRenderPassDesc());
......
......@@ -330,7 +330,7 @@ void RendererVk::onDestroy(vk::Context *context)
mDescriptorSetLayoutCache.destroy(mDevice);
mRenderPassCache.destroy(mDevice);
mPipelineCache.destroy(mDevice);
mGraphicsPipelineCache.destroy(mDevice);
mPipelineCacheVk.destroy(mDevice);
mSubmitSemaphorePool.destroy(mDevice);
mShaderLibrary.destroy(mDevice);
......@@ -1143,7 +1143,7 @@ angle::Result RendererVk::getPipeline(vk::Context *context,
const vk::ShaderAndSerial &vertexShader,
const vk::ShaderAndSerial &fragmentShader,
const vk::PipelineLayout &pipelineLayout,
const vk::PipelineDesc &pipelineDesc,
const vk::GraphicsPipelineDesc &pipelineDesc,
const gl::AttributesMask &activeAttribLocationsMask,
vk::PipelineAndSerial **pipelineOut)
{
......@@ -1157,9 +1157,9 @@ angle::Result RendererVk::getPipeline(vk::Context *context,
ANGLE_TRY(
getCompatibleRenderPass(context, pipelineDesc.getRenderPassDesc(), &compatibleRenderPass));
return mPipelineCache.getPipeline(context, mPipelineCacheVk, *compatibleRenderPass,
pipelineLayout, activeAttribLocationsMask, vertexShader.get(),
fragmentShader.get(), pipelineDesc, pipelineOut);
return mGraphicsPipelineCache.getPipeline(
context, mPipelineCacheVk, *compatibleRenderPass, pipelineLayout, activeAttribLocationsMask,
vertexShader.get(), fragmentShader.get(), pipelineDesc, pipelineOut);
}
angle::Result RendererVk::getDescriptorSetLayout(
......
......@@ -136,7 +136,7 @@ class RendererVk : angle::NonCopyable
const vk::ShaderAndSerial &vertexShader,
const vk::ShaderAndSerial &fragmentShader,
const vk::PipelineLayout &pipelineLayout,
const vk::PipelineDesc &pipelineDesc,
const vk::GraphicsPipelineDesc &pipelineDesc,
const gl::AttributesMask &activeAttribLocationsMask,
vk::PipelineAndSerial **pipelineOut);
......@@ -269,7 +269,7 @@ class RendererVk : angle::NonCopyable
vk::FormatTable mFormatTable;
RenderPassCache mRenderPassCache;
PipelineCache mPipelineCache;
GraphicsPipelineCache mGraphicsPipelineCache;
vk::PipelineCache mPipelineCacheVk;
egl::BlobCache::Key mPipelineCacheVkBlobKey;
......
......@@ -367,7 +367,7 @@ angle::Result VertexArrayVk::syncDirtyAttrib(ContextVk *contextVk,
return angle::Result::Continue();
}
void VertexArrayVk::getPackedInputDescriptions(vk::PipelineDesc *pipelineDesc)
void VertexArrayVk::getPackedInputDescriptions(vk::GraphicsPipelineDesc *pipelineDesc)
{
updatePackedInputDescriptions();
pipelineDesc->updateVertexInputInfo(mPackedInputBindings, mPackedInputAttributes);
......
......@@ -36,7 +36,7 @@ class VertexArrayVk : public VertexArrayImpl
const gl::VertexArray::DirtyAttribBitsArray &attribBits,
const gl::VertexArray::DirtyBindingBitsArray &bindingBits) override;
void getPackedInputDescriptions(vk::PipelineDesc *pipelineDesc);
void getPackedInputDescriptions(vk::GraphicsPipelineDesc *pipelineDesc);
void updateDefaultAttrib(RendererVk *renderer,
size_t attribIndex,
......
......@@ -243,20 +243,20 @@ constexpr size_t kShaderStageInfoSize = sizeof(ShaderStageInfo);
constexpr size_t kVertexInputBindingsSize = sizeof(VertexInputBindings);
constexpr size_t kVertexInputAttributesSize = sizeof(VertexInputAttributes);
class PipelineDesc final
class GraphicsPipelineDesc final
{
public:
// Use aligned allocation and free so we can use the alignas keyword.
void *operator new(std::size_t size);
void operator delete(void *ptr);
PipelineDesc();
~PipelineDesc();
PipelineDesc(const PipelineDesc &other);
PipelineDesc &operator=(const PipelineDesc &other);
GraphicsPipelineDesc();
~GraphicsPipelineDesc();
GraphicsPipelineDesc(const GraphicsPipelineDesc &other);
GraphicsPipelineDesc &operator=(const GraphicsPipelineDesc &other);
size_t hash() const;
bool operator==(const PipelineDesc &other) const;
bool operator==(const GraphicsPipelineDesc &other) const;
void initDefaults();
......@@ -334,13 +334,13 @@ class PipelineDesc final
// This is not guaranteed by the spec, but is validated by a compile-time check.
// No gaps or padding at the end ensures that hashing and memcmp checks will not run
// into uninitialized memory regions.
constexpr size_t kPipelineDescSumOfSizes =
constexpr size_t kGraphicsPipelineDescSumOfSizes =
kShaderStageInfoSize + kVertexInputBindingsSize + kVertexInputAttributesSize +
kPackedInputAssemblyAndColorBlendStateSize + kPackedRasterizationAndMultisampleStateSize +
kPackedDepthStencilStateSize + kRenderPassDescSize;
static constexpr size_t kPipelineDescSize = sizeof(PipelineDesc);
static_assert(kPipelineDescSize == kPipelineDescSumOfSizes, "Size mismatch");
static constexpr size_t kGraphicsPipelineDescSize = sizeof(GraphicsPipelineDesc);
static_assert(kGraphicsPipelineDescSize == kGraphicsPipelineDescSumOfSizes, "Size mismatch");
constexpr uint32_t kMaxDescriptorSetLayoutBindings = gl::IMPLEMENTATION_MAX_ACTIVE_TEXTURES;
......@@ -348,8 +348,8 @@ using DescriptorSetLayoutBindingVector =
angle::FixedVector<VkDescriptorSetLayoutBinding, kMaxDescriptorSetLayoutBindings>;
// A packed description of a descriptor set layout. Use similarly to RenderPassDesc and
// PipelineDesc. Currently we only need to differentiate layouts based on sampler usage. In the
// future we could generalize this.
// GraphicsPipelineDesc. Currently we only need to differentiate layouts based on sampler usage. In
// the future we could generalize this.
class DescriptorSetLayoutDesc final
{
public:
......@@ -454,9 +454,9 @@ struct hash<rx::vk::AttachmentOpsArray>
};
template <>
struct hash<rx::vk::PipelineDesc>
struct hash<rx::vk::GraphicsPipelineDesc>
{
size_t operator()(const rx::vk::PipelineDesc &key) const { return key.hash(); }
size_t operator()(const rx::vk::GraphicsPipelineDesc &key) const { return key.hash(); }
};
template <>
......@@ -503,15 +503,15 @@ class RenderPassCache final : angle::NonCopyable
};
// TODO(jmadill): Add cache trimming/eviction.
class PipelineCache final : angle::NonCopyable
class GraphicsPipelineCache final : angle::NonCopyable
{
public:
PipelineCache();
~PipelineCache();
GraphicsPipelineCache();
~GraphicsPipelineCache();
void destroy(VkDevice device);
void populate(const vk::PipelineDesc &desc, vk::Pipeline &&pipeline);
void populate(const vk::GraphicsPipelineDesc &desc, vk::Pipeline &&pipeline);
angle::Result getPipeline(vk::Context *context,
const vk::PipelineCache &pipelineCacheVk,
const vk::RenderPass &compatibleRenderPass,
......@@ -519,11 +519,11 @@ class PipelineCache final : angle::NonCopyable
const gl::AttributesMask &activeAttribLocationsMask,
const vk::ShaderModule &vertexModule,
const vk::ShaderModule &fragmentModule,
const vk::PipelineDesc &desc,
const vk::GraphicsPipelineDesc &desc,
vk::PipelineAndSerial **pipelineOut);
private:
std::unordered_map<vk::PipelineDesc, vk::PipelineAndSerial> mPayload;
std::unordered_map<vk::GraphicsPipelineDesc, vk::PipelineAndSerial> mPayload;
};
class DescriptorSetLayoutCache final : angle::NonCopyable
......
......@@ -26,15 +26,15 @@ class VulkanPipelineCachePerfTest : public ANGLEPerfTest
void SetUp() override;
void step() override;
PipelineCache mCache;
GraphicsPipelineCache mCache;
angle::RNG mRNG;
std::vector<vk::PipelineDesc> mCacheHits;
std::vector<vk::PipelineDesc> mCacheMisses;
std::vector<vk::GraphicsPipelineDesc> mCacheHits;
std::vector<vk::GraphicsPipelineDesc> mCacheMisses;
size_t mMissIndex = 0;
private:
void randomizeDesc(vk::PipelineDesc *desc);
void randomizeDesc(vk::GraphicsPipelineDesc *desc);
};
VulkanPipelineCachePerfTest::VulkanPipelineCachePerfTest()
......@@ -53,7 +53,7 @@ void VulkanPipelineCachePerfTest::SetUp()
for (int pipelineCount = 0; pipelineCount < 100; ++pipelineCount)
{
vk::Pipeline pipeline;
vk::PipelineDesc desc;
vk::GraphicsPipelineDesc desc;
randomizeDesc(&desc);
if (pipelineCount < 10)
......@@ -65,17 +65,17 @@ void VulkanPipelineCachePerfTest::SetUp()
for (int missCount = 0; missCount < 10000; ++missCount)
{
vk::PipelineDesc desc;
vk::GraphicsPipelineDesc desc;
randomizeDesc(&desc);
mCacheMisses.push_back(desc);
}
}
void VulkanPipelineCachePerfTest::randomizeDesc(vk::PipelineDesc *desc)
void VulkanPipelineCachePerfTest::randomizeDesc(vk::GraphicsPipelineDesc *desc)
{
std::vector<uint8_t> bytes(sizeof(vk::PipelineDesc));
std::vector<uint8_t> bytes(sizeof(vk::GraphicsPipelineDesc));
FillVectorWithRandomUBytes(&mRNG, &bytes);
memcpy(desc, bytes.data(), sizeof(vk::PipelineDesc));
memcpy(desc, bytes.data(), sizeof(vk::GraphicsPipelineDesc));
}
void VulkanPipelineCachePerfTest::step()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment