Commit f12e4123 by Tim Van Patten Committed by Commit Bot

Vulkan: Match descriptor pool sizes to descriptor set layouts

When a descriptor pool is created, a list of descriptor types and counts are given to vkCreateDescriptorPool(). Later, when allocating a descriptor set from the pool, we pass along a descriptor set layout to vkAllocateDescriptorSets() which is used to determine how many of each type of descriptor (i.e. binding) to allocate from the pool. In order for our "free descriptor set" counts to be accurate for each pool, the descriptor pools need to be created with descriptor counts that match the descriptor set layout binding counts. This change fixes a bug where the descriptor set layouts were created with more bindings than the descriptor pool sizes, causing the "free descriptor set" count to be inaccurate, leading to allocating too many descriptor sets from a pool. Bug: angleproject:3570 Test: VulkanDescriptorSetTest Change-Id: I660bf02d29a1291391fb15f39e6479bf348d0f83 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/2391114 Commit-Queue: Tim Van Patten <timvp@google.com> Reviewed-by: 's avatarCharlie Lao <cclao@google.com> Reviewed-by: 's avatarShahbaz Youssefi <syoussefi@chromium.org> Reviewed-by: 's avatarJamie Madill <jmadill@chromium.org>
parent 58463573
......@@ -3869,9 +3869,16 @@ angle::Result ContextVk::updateDriverUniformsDescriptorSet(
}
// Allocate a new descriptor set.
ANGLE_TRY(mDriverUniformsDescriptorPool.allocateSets(
bool newPoolAllocated;
ANGLE_TRY(mDriverUniformsDescriptorPool.allocateSetsAndGetInfo(
this, driverUniforms->descriptorSetLayout.get().ptr(), 1,
&driverUniforms->descriptorPoolBinding, &driverUniforms->descriptorSet));
&driverUniforms->descriptorPoolBinding, &driverUniforms->descriptorSet, &newPoolAllocated));
// Clear descriptor set cache. It may no longer be valid.
if (newPoolAllocated)
{
driverUniforms->descriptorSetCache.clear();
}
// Update the driver uniform descriptor set.
VkDescriptorBufferInfo &bufferInfo = allocDescriptorBufferInfo();
......@@ -4000,7 +4007,9 @@ angle::Result ContextVk::updateActiveTextures(const gl::Context *context)
if (haveImmutableSampler)
{
ANGLE_TRY(mExecutable->updatePipelineLayout(context, &mActiveTextures));
// TODO(http://anglebug.com/5033): This will recreate the descriptor pools each time, which
// will likely affect performance negatively.
ANGLE_TRY(mExecutable->createPipelineLayout(context, &mActiveTextures));
invalidateCurrentGraphicsPipeline();
}
......
......@@ -684,9 +684,70 @@ angle::Result ProgramExecutableVk::getComputePipeline(ContextVk *contextVk,
return shaderProgram->getComputePipeline(contextVk, getPipelineLayout(), pipelineOut);
}
// updatePipelineLayout is used to create the DescriptorSetLayout(s) and PipelineLayout and update
// them when we discover that an immutable sampler is in use.
angle::Result ProgramExecutableVk::updatePipelineLayout(
angle::Result ProgramExecutableVk::initDynamicDescriptorPools(
ContextVk *contextVk,
vk::DescriptorSetLayoutDesc &descriptorSetLayoutDesc,
DescriptorSetIndex descriptorSetIndex)
{
std::vector<VkDescriptorPoolSize> descriptorPoolSizes;
vk::DescriptorSetLayoutBindingVector bindingVector;
std::vector<VkSampler> immutableSamplers;
descriptorSetLayoutDesc.unpackBindings(&bindingVector, &immutableSamplers);
for (const VkDescriptorSetLayoutBinding &binding : bindingVector)
{
if (binding.descriptorCount > 0)
{
VkDescriptorPoolSize poolSize = {};
poolSize.type = binding.descriptorType;
poolSize.descriptorCount = binding.descriptorCount;
descriptorPoolSizes.emplace_back(poolSize);
}
}
RendererVk *renderer = contextVk->getRenderer();
if (renderer->getFeatures().bindEmptyForUnusedDescriptorSets.enabled &&
descriptorPoolSizes.empty())
{
// For this workaround, we have to create an empty descriptor set for each descriptor set
// index, so make sure their pools are initialized.
switch (descriptorSetIndex)
{
case DescriptorSetIndex::ShaderResource:
{
VkDescriptorPoolSize poolSize = {};
poolSize.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
poolSize.descriptorCount = 1;
descriptorPoolSizes.emplace_back(poolSize);
break;
}
case DescriptorSetIndex::Texture:
{
VkDescriptorPoolSize poolSize = {};
poolSize.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
poolSize.descriptorCount = 1;
descriptorPoolSizes.emplace_back(poolSize);
break;
}
default:
break;
}
}
if (!descriptorPoolSizes.empty())
{
ANGLE_TRY(mDynamicDescriptorPools[ToUnderlying(descriptorSetIndex)].init(
contextVk, descriptorPoolSizes.data(), descriptorPoolSizes.size()));
}
return angle::Result::Continue;
}
angle::Result ProgramExecutableVk::createPipelineLayout(
const gl::Context *glContext,
gl::ActiveTextureArray<vk::TextureUnit> *activeTextures)
{
......@@ -699,6 +760,15 @@ angle::Result ProgramExecutableVk::updatePipelineLayout(
gl::ShaderMap<const gl::ProgramState *> programStates;
fillProgramStateMap(contextVk, &programStates);
// If 'activeTextures' is null, this is creating a new pipeline layout, rather than re-creating
// one due to (for example) the addition of an immutable sampler. Creating a new pipeline layout
// requires a reset before proceeding to clean anything up from the last pipeline layout
// creation.
if (activeTextures == nullptr)
{
reset(contextVk);
}
// Store a reference to the pipeline and descriptor set layouts. This will create them if they
// don't already exist in the cache.
......@@ -742,14 +812,15 @@ angle::Result ProgramExecutableVk::updatePipelineLayout(
for (const gl::ShaderType shaderType : linkedShaderStages)
{
addInterfaceBlockDescriptorSetDesc(programStates[shaderType]->getUniformBlocks(),
shaderType, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
&resourcesSetDesc);
addInterfaceBlockDescriptorSetDesc(programStates[shaderType]->getShaderStorageBlocks(),
shaderType, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
&resourcesSetDesc);
addAtomicCounterBufferDescriptorSetDesc(
programStates[shaderType]->getAtomicCounterBuffers(), shaderType, &resourcesSetDesc);
const gl::ProgramState *programState = programStates[shaderType];
ASSERT(programState);
addInterfaceBlockDescriptorSetDesc(programState->getUniformBlocks(), shaderType,
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &resourcesSetDesc);
addInterfaceBlockDescriptorSetDesc(programState->getShaderStorageBlocks(), shaderType,
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resourcesSetDesc);
addAtomicCounterBufferDescriptorSetDesc(programState->getAtomicCounterBuffers(), shaderType,
&resourcesSetDesc);
}
for (const gl::ShaderType shaderType : linkedShaderStages)
......@@ -801,90 +872,12 @@ angle::Result ProgramExecutableVk::updatePipelineLayout(
ANGLE_TRY(renderer->getPipelineLayout(contextVk, pipelineLayoutDesc, mDescriptorSetLayouts,
&mPipelineLayout));
return angle::Result::Continue;
}
angle::Result ProgramExecutableVk::createPipelineLayout(const gl::Context *glContext)
{
ContextVk *contextVk = vk::GetImpl(glContext);
RendererVk *renderer = contextVk->getRenderer();
const gl::ProgramExecutable &glExecutable = getGlExecutable();
const gl::ShaderBitSet &linkedShaderStages = glExecutable.getLinkedShaderStages();
gl::ShaderMap<const gl::ProgramState *> programStates;
fillProgramStateMap(contextVk, &programStates);
reset(contextVk);
ANGLE_TRY(updatePipelineLayout(glContext, nullptr));
// Initialize descriptor pools.
std::array<VkDescriptorPoolSize, 2> uniformAndXfbSetSize = {
{{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
static_cast<uint32_t>(mNumDefaultUniformDescriptors)},
{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, gl::IMPLEMENTATION_MAX_TRANSFORM_FEEDBACK_BUFFERS}}};
uint32_t uniformBlockCount = 0;
uint32_t storageBlockCount = 0;
uint32_t atomicCounterBufferCount = 0;
uint32_t imageCount = 0;
uint32_t textureCount = 0;
for (const gl::ShaderType shaderType : linkedShaderStages)
{
const gl::ProgramState *programState = programStates[shaderType];
ASSERT(programState);
// TODO(timvp): http://anglebug.com/3570: These counts will be too high for monolithic
// programs, since it's the same ProgramState for each shader type.
uniformBlockCount += static_cast<uint32_t>(programState->getUniformBlocks().size());
storageBlockCount += static_cast<uint32_t>(programState->getShaderStorageBlocks().size());
atomicCounterBufferCount +=
static_cast<uint32_t>(programState->getAtomicCounterBuffers().size());
imageCount += static_cast<uint32_t>(programState->getImageBindings().size());
textureCount += static_cast<uint32_t>(programState->getSamplerBindings().size());
}
if (renderer->getFeatures().bindEmptyForUnusedDescriptorSets.enabled)
{
// For this workaround, we have to create an empty descriptor set for each descriptor set
// index, so make sure their pools are initialized.
uniformBlockCount = std::max(uniformBlockCount, 1u);
textureCount = std::max(textureCount, 1u);
}
constexpr size_t kResourceTypesInResourcesSet = 3;
angle::FixedVector<VkDescriptorPoolSize, kResourceTypesInResourcesSet> resourceSetSize;
if (uniformBlockCount > 0)
{
resourceSetSize.emplace_back(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, uniformBlockCount);
}
if (storageBlockCount > 0 || atomicCounterBufferCount > 0)
{
// Note that we always use an array of IMPLEMENTATION_MAX_ATOMIC_COUNTER_BUFFERS storage
// buffers for emulating atomic counters, so if there are any atomic counter buffers, we
// need to allocate IMPLEMENTATION_MAX_ATOMIC_COUNTER_BUFFERS descriptors.
const uint32_t atomicCounterStorageBufferCount =
atomicCounterBufferCount > 0 ? gl::IMPLEMENTATION_MAX_ATOMIC_COUNTER_BUFFERS : 0;
const uint32_t storageBufferDescCount = storageBlockCount + atomicCounterStorageBufferCount;
resourceSetSize.emplace_back(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, storageBufferDescCount);
}
if (imageCount > 0)
{
resourceSetSize.emplace_back(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, imageCount);
}
VkDescriptorPoolSize textureSetSize = {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, textureCount};
ANGLE_TRY(mDynamicDescriptorPools[ToUnderlying(DescriptorSetIndex::UniformsAndXfb)].init(
contextVk, uniformAndXfbSetSize.data(), uniformAndXfbSetSize.size()));
if (resourceSetSize.size() > 0)
{
ANGLE_TRY(mDynamicDescriptorPools[ToUnderlying(DescriptorSetIndex::ShaderResource)].init(
contextVk, resourceSetSize.data(), static_cast<uint32_t>(resourceSetSize.size())));
}
if (textureCount > 0)
{
ANGLE_TRY(mDynamicDescriptorPools[ToUnderlying(DescriptorSetIndex::Texture)].init(
contextVk, &textureSetSize, 1));
}
ANGLE_TRY(initDynamicDescriptorPools(contextVk, uniformsAndXfbSetDesc,
DescriptorSetIndex::UniformsAndXfb));
ANGLE_TRY(initDynamicDescriptorPools(contextVk, resourcesSetDesc,
DescriptorSetIndex::ShaderResource));
ANGLE_TRY(initDynamicDescriptorPools(contextVk, texturesSetDesc, DescriptorSetIndex::Texture));
mDynamicBufferOffsets.resize(glExecutable.getLinkedShaderStageCount());
......@@ -967,21 +960,19 @@ void ProgramExecutableVk::updateDefaultUniformsDescriptorSet(
writeInfo.pTexelBufferView = nullptr;
}
void ProgramExecutableVk::updateBuffersDescriptorSet(ContextVk *contextVk,
const gl::ShaderType shaderType,
vk::ResourceUseList *resourceUseList,
vk::CommandBufferHelper *commandBufferHelper,
const std::vector<gl::InterfaceBlock> &blocks,
VkDescriptorType descriptorType)
angle::Result ProgramExecutableVk::updateBuffersDescriptorSet(
ContextVk *contextVk,
const gl::ShaderType shaderType,
vk::ResourceUseList *resourceUseList,
vk::CommandBufferHelper *commandBufferHelper,
const std::vector<gl::InterfaceBlock> &blocks,
VkDescriptorType descriptorType)
{
if (blocks.empty())
{
return;
return angle::Result::Continue;
}
VkDescriptorSet descriptorSet =
mDescriptorSets[ToUnderlying(DescriptorSetIndex::ShaderResource)];
ASSERT(descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
const bool isStorageBuffer = descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
......@@ -991,6 +982,9 @@ void ProgramExecutableVk::updateBuffersDescriptorSet(ContextVk *contextVk,
gl::IMPLEMENTATION_MAX_UNIFORM_BUFFER_BINDINGS,
"The descriptor arrays here would have inadequate size for uniform buffer objects");
VkDescriptorSet descriptorSet =
mDescriptorSets[ToUnderlying(DescriptorSetIndex::ShaderResource)];
// Write uniform or storage buffers.
const gl::State &glState = contextVk->getState();
for (uint32_t bufferIndex = 0; bufferIndex < blocks.size(); ++bufferIndex)
......@@ -1035,6 +1029,14 @@ void ProgramExecutableVk::updateBuffersDescriptorSet(ContextVk *contextVk,
BufferVk *bufferVk = vk::GetImpl(bufferBinding.get());
vk::BufferHelper &bufferHelper = bufferVk->getBuffer();
// Lazily allocate the descriptor set, since we may not need one if all of the buffers are
// inactive.
if (descriptorSet == VK_NULL_HANDLE)
{
ANGLE_TRY(allocateDescriptorSet(contextVk, DescriptorSetIndex::ShaderResource));
descriptorSet = mDescriptorSets[ToUnderlying(DescriptorSetIndex::ShaderResource)];
}
ASSERT(descriptorSet != VK_NULL_HANDLE);
WriteBufferDescriptorSetBinding(bufferHelper, bufferBinding.getOffset(), size,
descriptorSet, descriptorType, binding, arrayElement, 0,
&bufferInfo, &writeInfo);
......@@ -1053,9 +1055,11 @@ void ProgramExecutableVk::updateBuffersDescriptorSet(ContextVk *contextVk,
kPipelineStageShaderMap[shaderType], &bufferHelper);
}
}
return angle::Result::Continue;
}
void ProgramExecutableVk::updateAtomicCounterBuffersDescriptorSet(
angle::Result ProgramExecutableVk::updateAtomicCounterBuffersDescriptorSet(
const gl::ProgramState &programState,
const gl::ShaderType shaderType,
ContextVk *contextVk,
......@@ -1068,7 +1072,7 @@ void ProgramExecutableVk::updateAtomicCounterBuffersDescriptorSet(
if (atomicCounterBuffers.empty())
{
return;
return angle::Result::Continue;
}
VkDescriptorSet descriptorSet =
......@@ -1079,7 +1083,7 @@ void ProgramExecutableVk::updateAtomicCounterBuffersDescriptorSet(
if (!info.activeStages[shaderType])
{
return;
return angle::Result::Continue;
}
gl::AtomicCounterBufferMask writtenBindings;
......@@ -1107,6 +1111,14 @@ void ProgramExecutableVk::updateAtomicCounterBuffersDescriptorSet(
BufferVk *bufferVk = vk::GetImpl(bufferBinding.get());
vk::BufferHelper &bufferHelper = bufferVk->getBuffer();
// Lazily allocate the descriptor set, since we may not need one if all of the buffers are
// inactive.
if (descriptorSet == VK_NULL_HANDLE)
{
ANGLE_TRY(allocateDescriptorSet(contextVk, DescriptorSetIndex::ShaderResource));
descriptorSet = mDescriptorSets[ToUnderlying(DescriptorSetIndex::ShaderResource)];
}
ASSERT(descriptorSet != VK_NULL_HANDLE);
VkDeviceSize size = GetShaderBufferBindingSize(bufferBinding);
WriteBufferDescriptorSetBinding(bufferHelper, bufferBinding.getOffset(), size,
descriptorSet, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
......@@ -1121,6 +1133,8 @@ void ProgramExecutableVk::updateAtomicCounterBuffersDescriptorSet(
writtenBindings.set(binding);
}
ASSERT(descriptorSet != VK_NULL_HANDLE);
// Bind the empty buffer to every array slot that's unused.
vk::BufferHelper &emptyBuffer = contextVk->getEmptyBuffer();
emptyBuffer.retain(&contextVk->getResourceUseList());
......@@ -1146,6 +1160,8 @@ void ProgramExecutableVk::updateAtomicCounterBuffersDescriptorSet(
writeInfos[writeCount].pTexelBufferView = nullptr;
writeCount++;
}
return angle::Result::Continue;
}
angle::Result ProgramExecutableVk::updateImagesDescriptorSet(
......@@ -1225,6 +1241,15 @@ angle::Result ProgramExecutableVk::updateImagesDescriptorSet(
// TODO(syoussefi): Support image data reinterpretation by using binding.format.
// http://anglebug.com/3563
// Lazily allocate the descriptor set, since we may not need one if all of the image
// uniforms are inactive.
if (descriptorSet == VK_NULL_HANDLE)
{
ANGLE_TRY(allocateDescriptorSet(contextVk, DescriptorSetIndex::ShaderResource));
descriptorSet = mDescriptorSets[ToUnderlying(DescriptorSetIndex::ShaderResource)];
}
ASSERT(descriptorSet != VK_NULL_HANDLE);
imageInfos[arrayElement].sampler = VK_NULL_HANDLE;
imageInfos[arrayElement].imageView = imageView->getHandle();
imageInfos[arrayElement].imageLayout = image->getCurrentLayout();
......@@ -1261,21 +1286,23 @@ angle::Result ProgramExecutableVk::updateShaderResourcesDescriptorSet(
gl::ShaderMap<const gl::ProgramState *> programStates;
fillProgramStateMap(contextVk, &programStates);
ANGLE_TRY(allocateDescriptorSet(contextVk, DescriptorSetIndex::ShaderResource));
// Reset the descriptor set handles so we only allocate a new one when necessary.
mDescriptorSets[ToUnderlying(DescriptorSetIndex::ShaderResource)] = VK_NULL_HANDLE;
mEmptyDescriptorSets[ToUnderlying(DescriptorSetIndex::ShaderResource)] = VK_NULL_HANDLE;
for (const gl::ShaderType shaderType : executable->getLinkedShaderStages())
{
const gl::ProgramState *programState = programStates[shaderType];
ASSERT(programState);
updateBuffersDescriptorSet(contextVk, shaderType, resourceUseList, commandBufferHelper,
programState->getUniformBlocks(),
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
updateBuffersDescriptorSet(contextVk, shaderType, resourceUseList, commandBufferHelper,
programState->getShaderStorageBlocks(),
VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
updateAtomicCounterBuffersDescriptorSet(*programState, shaderType, contextVk,
resourceUseList, commandBufferHelper);
ANGLE_TRY(updateBuffersDescriptorSet(contextVk, shaderType, resourceUseList,
commandBufferHelper, programState->getUniformBlocks(),
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER));
ANGLE_TRY(updateBuffersDescriptorSet(
contextVk, shaderType, resourceUseList, commandBufferHelper,
programState->getShaderStorageBlocks(), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
ANGLE_TRY(updateAtomicCounterBuffersDescriptorSet(*programState, shaderType, contextVk,
resourceUseList, commandBufferHelper));
angle::Result status =
updateImagesDescriptorSet(programState->getExecutable(), shaderType, contextVk);
if (status != angle::Result::Continue)
......@@ -1366,17 +1393,7 @@ angle::Result ProgramExecutableVk::updateTexturesDescriptorSet(ContextVk *contex
return angle::Result::Continue;
}
bool newPoolAllocated;
ANGLE_TRY(
allocateDescriptorSetAndGetInfo(contextVk, DescriptorSetIndex::Texture, &newPoolAllocated));
// Clear descriptor set cache. It may no longer be valid.
if (newPoolAllocated)
{
mTextureDescriptorsCache.clear();
}
VkDescriptorSet descriptorSet = mDescriptorSets[ToUnderlying(DescriptorSetIndex::Texture)];
VkDescriptorSet descriptorSet = VK_NULL_HANDLE;
const gl::ActiveTextureArray<vk::TextureUnit> &activeTextures = contextVk->getActiveTextures();
......@@ -1457,6 +1474,25 @@ angle::Result ProgramExecutableVk::updateTexturesDescriptorSet(ContextVk *contex
: GlslangGetMappedSamplerName(samplerUniform.name);
ShaderInterfaceVariableInfo &info = variableInfoMap[samplerName];
// Lazily allocate the descriptor set, since we may not need one if all of the
// sampler uniforms are inactive.
if (descriptorSet == VK_NULL_HANDLE)
{
bool newPoolAllocated;
ANGLE_TRY(allocateDescriptorSetAndGetInfo(
contextVk, DescriptorSetIndex::Texture, &newPoolAllocated));
// Clear descriptor set cache. It may no longer be valid.
if (newPoolAllocated)
{
mTextureDescriptorsCache.clear();
}
descriptorSet = mDescriptorSets[ToUnderlying(DescriptorSetIndex::Texture)];
mTextureDescriptorsCache.emplace(texturesDesc, descriptorSet);
}
ASSERT(descriptorSet != VK_NULL_HANDLE);
writeInfos[arrayElement].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writeInfos[arrayElement].pNext = nullptr;
writeInfos[arrayElement].dstSet = descriptorSet;
......@@ -1471,8 +1507,6 @@ angle::Result ProgramExecutableVk::updateTexturesDescriptorSet(ContextVk *contex
}
}
mTextureDescriptorsCache.emplace(texturesDesc, descriptorSet);
return angle::Result::Continue;
}
......
......@@ -139,8 +139,7 @@ class ProgramExecutableVk
angle::Result getComputePipeline(ContextVk *contextVk, vk::PipelineAndSerial **pipelineOut);
const vk::PipelineLayout &getPipelineLayout() const { return mPipelineLayout.get(); }
angle::Result createPipelineLayout(const gl::Context *glContext);
angle::Result updatePipelineLayout(const gl::Context *glContext,
angle::Result createPipelineLayout(const gl::Context *glContext,
gl::ActiveTextureArray<vk::TextureUnit> *activeTextures);
angle::Result updateTexturesDescriptorSet(ContextVk *contextVk);
......@@ -205,20 +204,24 @@ class ProgramExecutableVk
ContextVk *contextVk);
void updateTransformFeedbackDescriptorSetImpl(const gl::ProgramState &programState,
ContextVk *contextVk);
void updateBuffersDescriptorSet(ContextVk *contextVk,
const gl::ShaderType shaderType,
vk::ResourceUseList *resourceUseList,
vk::CommandBufferHelper *commandBufferHelper,
const std::vector<gl::InterfaceBlock> &blocks,
VkDescriptorType descriptorType);
void updateAtomicCounterBuffersDescriptorSet(const gl::ProgramState &programState,
const gl::ShaderType shaderType,
ContextVk *contextVk,
vk::ResourceUseList *resourceUseList,
vk::CommandBufferHelper *commandBufferHelper);
angle::Result updateBuffersDescriptorSet(ContextVk *contextVk,
const gl::ShaderType shaderType,
vk::ResourceUseList *resourceUseList,
vk::CommandBufferHelper *commandBufferHelper,
const std::vector<gl::InterfaceBlock> &blocks,
VkDescriptorType descriptorType);
angle::Result updateAtomicCounterBuffersDescriptorSet(
const gl::ProgramState &programState,
const gl::ShaderType shaderType,
ContextVk *contextVk,
vk::ResourceUseList *resourceUseList,
vk::CommandBufferHelper *commandBufferHelper);
angle::Result updateImagesDescriptorSet(const gl::ProgramExecutable &executable,
const gl::ShaderType shaderType,
ContextVk *contextVk);
angle::Result initDynamicDescriptorPools(ContextVk *contextVk,
vk::DescriptorSetLayoutDesc &descriptorSetLayoutDesc,
DescriptorSetIndex descriptorSetIndex);
// Descriptor sets for uniform blocks and textures for this program.
vk::DescriptorSetLayoutArray<VkDescriptorSet> mDescriptorSets;
......
......@@ -92,7 +92,7 @@ angle::Result ProgramPipelineVk::link(const gl::Context *glContext,
mExecutable.resolvePrecisionMismatch(mergedVaryings);
}
return mExecutable.createPipelineLayout(glContext);
return mExecutable.createPipelineLayout(glContext, nullptr);
}
size_t ProgramPipelineVk::calcUniformUpdateRequiredSpace(
......
......@@ -199,7 +199,7 @@ std::unique_ptr<rx::LinkEvent> ProgramVk::load(const gl::Context *context,
return std::make_unique<LinkEventDone>(status);
}
status = mExecutable.createPipelineLayout(context);
status = mExecutable.createPipelineLayout(context, nullptr);
return std::make_unique<LinkEventDone>(status);
}
......@@ -295,7 +295,7 @@ std::unique_ptr<LinkEvent> ProgramVk::link(const gl::Context *context,
// TODO(jie.a.chen@intel.com): Parallelize linking.
// http://crbug.com/849576
status = mExecutable.createPipelineLayout(context);
status = mExecutable.createPipelineLayout(context, nullptr);
return std::make_unique<LinkEventDone>(status);
}
......
......@@ -44,9 +44,6 @@ constexpr VkBufferUsageFlags kLineLoopDynamicIndirectBufferUsage =
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
constexpr int kLineLoopDynamicIndirectBufferInitialSize = sizeof(VkDrawIndirectCommand) * 16;
// This is an arbitrary max. We can change this later if necessary.
constexpr uint32_t kDefaultDescriptorPoolMaxSets = 128;
constexpr angle::PackedEnumMap<PipelineStage, VkPipelineStageFlagBits> kPipelineStageFlagBitMap = {
{PipelineStage::TopOfPipe, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT},
{PipelineStage::DrawIndirect, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT},
......@@ -564,6 +561,9 @@ bool IsShaderReadOnlyLayout(const ImageMemoryBarrierData &imageLayout)
}
} // anonymous namespace
// This is an arbitrary max. We can change this later if necessary.
uint32_t DynamicDescriptorPool::mMaxSetsPerPool = 128;
VkImageLayout ConvertImageLayoutToVkImageLayout(ImageLayout imageLayout)
{
return kImageMemoryBarrierData[imageLayout].layout;
......@@ -1680,14 +1680,14 @@ bool DescriptorPoolHelper::hasCapacity(uint32_t descriptorSetCount) const
return mFreeDescriptorSets >= descriptorSetCount;
}
angle::Result DescriptorPoolHelper::init(Context *context,
angle::Result DescriptorPoolHelper::init(ContextVk *contextVk,
const std::vector<VkDescriptorPoolSize> &poolSizes,
uint32_t maxSets)
{
if (mDescriptorPool.valid())
{
// This could be improved by recycling the descriptor pool.
mDescriptorPool.destroy(context->getDevice());
mDescriptorPool.destroy(contextVk->getDevice());
}
VkDescriptorPoolCreateInfo descriptorPoolInfo = {};
......@@ -1699,7 +1699,8 @@ angle::Result DescriptorPoolHelper::init(Context *context,
mFreeDescriptorSets = maxSets;
ANGLE_VK_TRY(context, mDescriptorPool.init(context->getDevice(), descriptorPoolInfo));
ANGLE_VK_TRY(contextVk, mDescriptorPool.init(contextVk->getDevice(), descriptorPoolInfo));
return angle::Result::Continue;
}
......@@ -1729,23 +1730,25 @@ angle::Result DescriptorPoolHelper::allocateSets(ContextVk *contextVk,
ANGLE_VK_TRY(contextVk, mDescriptorPool.allocateDescriptorSets(contextVk->getDevice(),
allocInfo, descriptorSetsOut));
return angle::Result::Continue;
}
// DynamicDescriptorPool implementation.
DynamicDescriptorPool::DynamicDescriptorPool()
: mMaxSetsPerPool(kDefaultDescriptorPoolMaxSets), mCurrentPoolIndex(0)
{}
DynamicDescriptorPool::DynamicDescriptorPool() : mCurrentPoolIndex(0) {}
DynamicDescriptorPool::~DynamicDescriptorPool() = default;
angle::Result DynamicDescriptorPool::init(ContextVk *contextVk,
const VkDescriptorPoolSize *setSizes,
uint32_t setSizeCount)
size_t setSizeCount)
{
ASSERT(setSizes);
ASSERT(setSizeCount);
ASSERT(mCurrentPoolIndex == 0);
ASSERT(mDescriptorPools.empty() || (mDescriptorPools.size() == 1 &&
mDescriptorPools[0]->get().hasCapacity(mMaxSetsPerPool)));
ASSERT(mDescriptorPools.empty() ||
(mDescriptorPools.size() == 1 &&
mDescriptorPools[mCurrentPoolIndex]->get().hasCapacity(mMaxSetsPerPool)));
mPoolSizes.assign(setSizes, setSizes + setSizeCount);
for (uint32_t i = 0; i < setSizeCount; ++i)
......@@ -1754,7 +1757,8 @@ angle::Result DynamicDescriptorPool::init(ContextVk *contextVk,
}
mDescriptorPools.push_back(new RefCountedDescriptorPoolHelper());
return mDescriptorPools[0]->get().init(contextVk, mPoolSizes, mMaxSetsPerPool);
mCurrentPoolIndex = mDescriptorPools.size() - 1;
return mDescriptorPools[mCurrentPoolIndex]->get().init(contextVk, mPoolSizes, mMaxSetsPerPool);
}
void DynamicDescriptorPool::destroy(VkDevice device)
......@@ -1789,6 +1793,8 @@ angle::Result DynamicDescriptorPool::allocateSetsAndGetInfo(
VkDescriptorSet *descriptorSetsOut,
bool *newPoolAllocatedOut)
{
ASSERT(!mDescriptorPools.empty());
*newPoolAllocatedOut = false;
if (!bindingOut->valid() || !bindingOut->get().hasCapacity(descriptorSetCount))
......@@ -1842,7 +1848,12 @@ angle::Result DynamicDescriptorPool::allocateNewPool(ContextVk *contextVk)
return mDescriptorPools[mCurrentPoolIndex]->get().init(contextVk, mPoolSizes, mMaxSetsPerPool);
}
void DynamicDescriptorPool::setMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool)
uint32_t DynamicDescriptorPool::GetMaxSetsPerPoolForTesting()
{
return mMaxSetsPerPool;
}
void DynamicDescriptorPool::SetMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool)
{
mMaxSetsPerPool = maxSetsPerPool;
}
......
......@@ -252,7 +252,7 @@ class DescriptorPoolHelper
bool valid() { return mDescriptorPool.valid(); }
bool hasCapacity(uint32_t descriptorSetCount) const;
angle::Result init(Context *context,
angle::Result init(ContextVk *contextVk,
const std::vector<VkDescriptorPoolSize> &poolSizes,
uint32_t maxSets);
void destroy(VkDevice device);
......@@ -287,7 +287,7 @@ class DynamicDescriptorPool final : angle::NonCopyable
// an individual set. The pool size will be calculated accordingly.
angle::Result init(ContextVk *contextVk,
const VkDescriptorPoolSize *setSizes,
uint32_t setSizeCount);
size_t setSizeCount);
void destroy(VkDevice device);
void release(ContextVk *contextVk);
......@@ -314,12 +314,13 @@ class DynamicDescriptorPool final : angle::NonCopyable
bool *newPoolAllocatedOut);
// For testing only!
void setMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool);
static uint32_t GetMaxSetsPerPoolForTesting();
static void SetMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool);
private:
angle::Result allocateNewPool(ContextVk *contextVk);
uint32_t mMaxSetsPerPool;
static uint32_t mMaxSetsPerPool;
size_t mCurrentPoolIndex;
std::vector<RefCountedDescriptorPoolHelper *> mDescriptorPools;
std::vector<VkDescriptorPoolSize> mPoolSizes;
......
......@@ -18,6 +18,7 @@ angle_white_box_tests_win_sources = [
"gl_tests/ErrorMessages.cpp",
]
angle_white_box_tests_vulkan_sources = [
"gl_tests/VulkanDescriptorSetTest.cpp",
"gl_tests/VulkanFormatTablesTest.cpp",
"gl_tests/VulkanFramebufferTest.cpp",
"gl_tests/VulkanPerformanceCounterTest.cpp",
......
//
// Copyright 2020 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// VulkanDescriptorSetTest:
// Various tests related for Vulkan descriptor sets.
//
#include "test_utils/ANGLETest.h"
#include "test_utils/gl_raii.h"
#include "libANGLE/Context.h"
#include "libANGLE/angletypes.h"
#include "libANGLE/renderer/vulkan/ContextVk.h"
#include "libANGLE/renderer/vulkan/ProgramVk.h"
#include "libANGLE/renderer/vulkan/vk_helpers.h"
using namespace angle;
namespace
{
class VulkanDescriptorSetTest : public ANGLETest
{
protected:
VulkanDescriptorSetTest() {}
void testSetUp() override
{
mMaxSetsPerPool = rx::vk::DynamicDescriptorPool::GetMaxSetsPerPoolForTesting();
}
void testTearDown() override
{
rx::vk::DynamicDescriptorPool::SetMaxSetsPerPoolForTesting(mMaxSetsPerPool);
}
static constexpr uint32_t kMaxSetsForTesting = 1;
void limitMaxSets()
{
rx::vk::DynamicDescriptorPool::SetMaxSetsPerPoolForTesting(kMaxSetsForTesting);
}
private:
uint32_t mMaxSetsPerPool;
};
// Test atomic counter read.
TEST_P(VulkanDescriptorSetTest, AtomicCounterReadLimitedDescriptorPool)
{
// Skipping due to a bug on the Qualcomm Vulkan Android driver.
// http://anglebug.com/3726
ANGLE_SKIP_TEST_IF(IsAndroid() && IsVulkan());
// Skipping test while we work on enabling atomic counter buffer support in th D3D renderer.
// http://anglebug.com/1729
ANGLE_SKIP_TEST_IF(IsD3D11());
// Must be before program creation to limit the descriptor pool sizes when creating the pipeline
// layout.
limitMaxSets();
constexpr char kFS[] =
"#version 310 es\n"
"precision highp float;\n"
"layout(binding = 0, offset = 4) uniform atomic_uint ac;\n"
"out highp vec4 my_color;\n"
"void main()\n"
"{\n"
" my_color = vec4(0.0);\n"
" uint a1 = atomicCounter(ac);\n"
" if (a1 == 3u) my_color = vec4(1.0);\n"
"}\n";
ANGLE_GL_PROGRAM(program, essl31_shaders::vs::Simple(), kFS);
glUseProgram(program.get());
// The initial value of counter 'ac' is 3u.
unsigned int bufferData[3] = {11u, 3u, 1u};
GLBuffer atomicCounterBuffer;
glBindBuffer(GL_ATOMIC_COUNTER_BUFFER, atomicCounterBuffer);
glBindBufferBase(GL_ATOMIC_COUNTER_BUFFER, 0, atomicCounterBuffer);
for (int i = 0; i < 5; ++i)
{
glBufferData(GL_ATOMIC_COUNTER_BUFFER, sizeof(bufferData), bufferData, GL_STATIC_DRAW);
drawQuad(program.get(), essl31_shaders::PositionAttrib(), 0.0f);
ASSERT_GL_NO_ERROR();
EXPECT_PIXEL_COLOR_EQ(0, 0, GLColor::white);
}
}
ANGLE_INSTANTIATE_TEST(VulkanDescriptorSetTest, ES31_VULKAN(), ES31_VULKAN_SWIFTSHADER());
} // namespace
\ No newline at end of file
......@@ -39,21 +39,20 @@ class VulkanUniformUpdatesTest : public ANGLETest
// tests. This is to ensure that the assumption that each TEST_P will recreate context.
ASSERT(mLastContext != getEGLWindow()->getContext());
mLastContext = getEGLWindow()->getContext();
mMaxSetsPerPool = rx::vk::DynamicDescriptorPool::GetMaxSetsPerPoolForTesting();
}
rx::ContextVk *hackANGLE() const
void testTearDown() override
{
// Hack the angle!
const gl::Context *context = static_cast<gl::Context *>(getEGLWindow()->getContext());
return rx::GetImplAs<rx::ContextVk>(context);
rx::vk::DynamicDescriptorPool::SetMaxSetsPerPoolForTesting(mMaxSetsPerPool);
}
rx::ProgramVk *hackProgram(GLuint handle) const
rx::ContextVk *hackANGLE() const
{
// Hack the angle!
const gl::Context *context = static_cast<gl::Context *>(getEGLWindow()->getContext());
const gl::Program *program = context->getProgramResolveLink({handle});
return rx::vk::GetImpl(program);
return rx::GetImplAs<rx::ContextVk>(context);
}
rx::TextureVk *hackTexture(GLuint handle) const
......@@ -64,33 +63,11 @@ class VulkanUniformUpdatesTest : public ANGLETest
return rx::vk::GetImpl(texture);
}
static constexpr uint32_t kMaxSetsForTesting = 32;
static constexpr uint32_t kMaxSetsForTesting = 1;
void limitMaxSets(GLuint program)
void limitMaxSets()
{
rx::ContextVk *contextVk = hackANGLE();
rx::ProgramVk *programVk = hackProgram(program);
// Force a small limit on the max sets per pool to more easily trigger a new allocation.
rx::vk::DynamicDescriptorPool *uniformPool = programVk->getDynamicDescriptorPool(
ToUnderlying(rx::DescriptorSetIndex::UniformsAndXfb));
uniformPool->setMaxSetsPerPoolForTesting(kMaxSetsForTesting);
VkDescriptorPoolSize uniformSetSize = {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
rx::kReservedDefaultUniformBindingCount};
(void)uniformPool->init(contextVk, &uniformSetSize, 1);
uint32_t textureCount =
static_cast<uint32_t>(programVk->getState().getSamplerBindings().size());
// To support the bindEmptyForUnusedDescriptorSets workaround.
textureCount = std::max(textureCount, 1u);
rx::vk::DynamicDescriptorPool *texturePool =
programVk->getDynamicDescriptorPool(ToUnderlying(rx::DescriptorSetIndex::Texture));
texturePool->setMaxSetsPerPoolForTesting(kMaxSetsForTesting);
VkDescriptorPoolSize textureSetSize = {VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
textureCount};
(void)texturePool->init(contextVk, &textureSetSize, 1);
rx::vk::DynamicDescriptorPool::SetMaxSetsPerPoolForTesting(kMaxSetsForTesting);
}
static constexpr size_t kTextureStagingBufferSizeForTesting = 128;
......@@ -103,6 +80,7 @@ class VulkanUniformUpdatesTest : public ANGLETest
private:
EGLContext mLastContext;
uint32_t mMaxSetsPerPool;
};
// This test updates a uniform until a new buffer is allocated and then make sure the uniform
......@@ -128,7 +106,7 @@ void main()
ANGLE_GL_PROGRAM(program, kPositionUniformVertexShader, kColorUniformFragmentShader);
glUseProgram(program);
limitMaxSets(program);
limitMaxSets();
// Set a really small min size so that uniform updates often allocates a new buffer.
rx::ContextVk *contextVk = hackANGLE();
......@@ -171,7 +149,7 @@ TEST_P(VulkanUniformUpdatesTest, DescriptorPoolUpdates)
glUseProgram(program);
// Force a small limit on the max sets per pool to more easily trigger a new allocation.
limitMaxSets(program);
limitMaxSets();
GLint texLoc = glGetUniformLocation(program, "tex");
ASSERT_NE(-1, texLoc);
......@@ -212,7 +190,7 @@ void main()
ANGLE_GL_PROGRAM(program, essl1_shaders::vs::Texture2D(), kFS);
glUseProgram(program);
limitMaxSets(program);
limitMaxSets();
// Get uniform locations.
GLint texLoc = glGetUniformLocation(program, "tex");
......@@ -280,7 +258,7 @@ void main()
ANGLE_GL_PROGRAM(program, essl1_shaders::vs::Texture2D(), kFS);
glUseProgram(program);
limitMaxSets(program);
limitMaxSets();
// Initialize large arrays of textures.
std::vector<GLTexture> whiteTextures;
......@@ -369,8 +347,8 @@ void main()
glUseProgram(program1);
// Force a small limit on the max sets per pool to more easily trigger a new allocation.
limitMaxSets(program1);
limitMaxSets(program2);
limitMaxSets();
limitMaxSets();
// Set a really small min size so that uniform updates often allocates a new buffer.
rx::ContextVk *contextVk = hackANGLE();
......@@ -513,7 +491,7 @@ void main()
ANGLE_GL_PROGRAM(program, kPositionUniformVertexShader, kColorUniformFragmentShader);
glUseProgram(program);
limitMaxSets(program);
limitMaxSets();
// Set a really small min size so that every uniform update actually allocates a new buffer.
rx::ContextVk *contextVk = hackANGLE();
......@@ -613,7 +591,7 @@ void main()
EXPECT_GL_NO_ERROR();
glUseProgram(program);
limitMaxSets(program);
limitMaxSets();
// Set a really small min size so that every uniform update actually allocates a new buffer.
rx::ContextVk *contextVk = hackANGLE();
contextVk->setDefaultUniformBlocksMinSizeForTesting(128);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment