Commit 78bcd2be by Jamie Madill Committed by Commit Bot

Vulkan: Fix deleting in-use descriptor sets.

Sequences of many frames with uniform updates could lead to a bug where we attempt to delete descriptor sets that are still in use. To trigger the bug we need to write enough uniform data to trigger a descriptor set to be freed. This would correctly trigger refresh sets in the Program. But if there was a second program idle in the background that also allocated descriptors from the old pool, the bug would manifest. Fix this by storing a shared handle to the descriptor pool in the Program. The dynamic descriptor pool won't recycle descriptor pools internally unless there are zero outstanding references to the pool. We could also improve this in a resource sharing situation by keeping a single shared dynamic descriptor pool per share group. Includes a contribution from tobine@google.com that adds a test to cover the bug. Bug: angleproject:2863 Change-Id: Id585b85f33f8cfa3772ceff3af512d1e4fb0b75a Reviewed-on: https://chromium-review.googlesource.com/c/1271919Reviewed-by: 's avatarShahbaz Youssefi <syoussefi@chromium.org> Reviewed-by: 's avatarJamie Madill <jmadill@chromium.org> Commit-Queue: Jamie Madill <jmadill@chromium.org>
parent 1d8a783c
......@@ -920,7 +920,7 @@ Program::~Program()
void Program::onDestroy(const Context *context)
{
ASSERT(mLinkResolved);
resolveLink(context);
for (ShaderType shaderType : AllShaderTypes())
{
if (mState.mAttachedShaders[shaderType])
......
......@@ -159,6 +159,7 @@ void ContextVk::onDestroy(const gl::Context *context)
mDriverUniformsSetLayout.reset();
mIncompleteTextures.onDestroy(context);
mDriverUniformsBuffer.destroy(getDevice());
mDriverUniformsDescriptorPoolBinding.reset();
for (vk::DynamicDescriptorPool &descriptorPool : mDynamicDescriptorPools)
{
......@@ -188,21 +189,12 @@ gl::Error ContextVk::getIncompleteTexture(const gl::Context *context,
gl::Error ContextVk::initialize()
{
// Note that this may reserve more sets than strictly necessary for a particular layout.
VkDescriptorPoolSize uniformPoolSize = {
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
GetUniformBufferDescriptorCount() * vk::kDefaultDescriptorPoolMaxSets};
ANGLE_TRY(mDynamicDescriptorPools[kUniformsDescriptorSetIndex].init(this, uniformPoolSize));
VkDescriptorPoolSize imageSamplerPoolSize = {
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
mRenderer->getMaxActiveTextures() * vk::kDefaultDescriptorPoolMaxSets};
ANGLE_TRY(mDynamicDescriptorPools[kTextureDescriptorSetIndex].init(this, imageSamplerPoolSize));
VkDescriptorPoolSize driverUniformsPoolSize = {VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
vk::kDefaultDescriptorPoolMaxSets};
ANGLE_TRY(mDynamicDescriptorPools[kUniformsDescriptorSetIndex].init(
this, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, GetUniformBufferDescriptorCount()));
ANGLE_TRY(mDynamicDescriptorPools[kTextureDescriptorSetIndex].init(
this, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, mRenderer->getMaxActiveTextures()));
ANGLE_TRY(mDynamicDescriptorPools[kDriverUniformsDescriptorSetIndex].init(
this, driverUniformsPoolSize));
this, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1));
ANGLE_TRY(mQueryPools[gl::QueryType::AnySamples].init(this, VK_QUERY_TYPE_OCCLUSION,
vk::kDefaultOcclusionQueryPoolSize));
......@@ -1170,7 +1162,8 @@ angle::Result ContextVk::handleDirtyDriverUniforms(const gl::Context *context,
// Allocate a new descriptor set.
ANGLE_TRY(mDynamicDescriptorPools[kDriverUniformsDescriptorSetIndex].allocateSets(
this, mDriverUniformsSetLayout.get().ptr(), 1, &mDriverUniformsDescriptorSet));
this, mDriverUniformsSetLayout.get().ptr(), 1, &mDriverUniformsDescriptorPoolBinding,
&mDriverUniformsDescriptorSet));
// Update the driver uniform descriptor set.
VkDescriptorBufferInfo bufferInfo = {};
......
......@@ -255,6 +255,8 @@ class ContextVk : public ContextImpl, public vk::Context
// The descriptor pools are externally sychronized, so cannot be accessed from different
// threads simultaneously. Hence, we keep them in the ContextVk instead of the RendererVk.
// Note that this implementation would need to change in shared resource scenarios. Likely
// we'd instead share a single set of dynamic descriptor pools between the share groups.
// Same with query pools.
vk::DescriptorSetLayoutArray<vk::DynamicDescriptorPool> mDynamicDescriptorPools;
angle::PackedEnumMap<gl::QueryType, vk::DynamicQueryPool> mQueryPools;
......@@ -304,6 +306,7 @@ class ContextVk : public ContextImpl, public vk::Context
vk::DynamicBuffer mDriverUniformsBuffer;
VkDescriptorSet mDriverUniformsDescriptorSet;
vk::BindingPointer<vk::DescriptorSetLayout> mDriverUniformsSetLayout;
vk::SharedDescriptorPoolBinding mDriverUniformsDescriptorPoolBinding;
// This cache should also probably include the texture index (shader location) and array
// index (also in the shader). This info is used in the descriptor update step.
......
......@@ -231,6 +231,11 @@ angle::Result ProgramVk::reset(ContextVk *contextVk)
mDescriptorSets.clear();
mUsedDescriptorSetRange.invalidate();
for (vk::SharedDescriptorPoolBinding &binding : mDescriptorPoolBindings)
{
binding.reset();
}
return angle::Result::Continue();
}
......@@ -775,6 +780,7 @@ angle::Result ProgramVk::allocateDescriptorSet(ContextVk *contextVk, uint32_t de
const vk::DescriptorSetLayout &descriptorSetLayout =
mDescriptorSetLayouts[descriptorSetIndex].get();
ANGLE_TRY(dynamicDescriptorPool->allocateSets(contextVk, descriptorSetLayout.ptr(), 1,
&mDescriptorPoolBindings[descriptorSetIndex],
&mDescriptorSets[descriptorSetIndex]));
return angle::Result::Continue();
}
......
......@@ -174,6 +174,10 @@ class ProgramVk : public ProgramImpl
vk::BindingPointer<vk::PipelineLayout> mPipelineLayout;
vk::DescriptorSetLayoutPointerArray mDescriptorSetLayouts;
// Keep bindings to the descriptor pools. This ensures the pools stay valid while the Program
// is in use.
vk::DescriptorSetLayoutArray<vk::SharedDescriptorPoolBinding> mDescriptorPoolBindings;
class ShaderInfo final : angle::NonCopyable
{
public:
......
......@@ -22,82 +22,6 @@ namespace vk
{
class ImageHelper;
// This is a very simple RefCount class that has no autoreleasing. Used in the descriptor set and
// pipeline layout caches.
template <typename T>
class RefCounted : angle::NonCopyable
{
public:
RefCounted() : mRefCount(0) {}
explicit RefCounted(T &&newObject) : mRefCount(0), mObject(std::move(newObject)) {}
~RefCounted() { ASSERT(mRefCount == 0 && !mObject.valid()); }
RefCounted(RefCounted &&copy) : mRefCount(copy.mRefCount), mObject(std::move(copy.mObject))
{
copy.mRefCount = 0;
}
RefCounted &operator=(RefCounted &&rhs)
{
std::swap(mRefCount, rhs.mRefCount);
mObject = std::move(rhs.mObject);
return *this;
}
void addRef()
{
ASSERT(mRefCount != std::numeric_limits<uint32_t>::max());
mRefCount++;
}
void releaseRef()
{
ASSERT(isReferenced());
mRefCount--;
}
bool isReferenced() const { return mRefCount != 0; }
T &get() { return mObject; }
const T &get() const { return mObject; }
private:
uint32_t mRefCount;
T mObject;
};
template <typename T>
class BindingPointer final : angle::NonCopyable
{
public:
BindingPointer() : mRefCounted(nullptr) {}
~BindingPointer() { reset(); }
void set(RefCounted<T> *refCounted)
{
if (mRefCounted)
{
mRefCounted->releaseRef();
}
mRefCounted = refCounted;
if (mRefCounted)
{
mRefCounted->addRef();
}
}
void reset() { set(nullptr); }
T &get() { return mRefCounted->get(); }
const T &get() const { return mRefCounted->get(); }
bool valid() const { return mRefCounted != nullptr; }
private:
RefCounted<T> *mRefCounted;
};
using RenderPassAndSerial = ObjectAndSerial<RenderPass>;
using PipelineAndSerial = ObjectAndSerial<Pipeline>;
......
......@@ -23,6 +23,9 @@ constexpr VkBufferUsageFlags kLineLoopDynamicBufferUsage =
(VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT);
constexpr int kLineLoopDynamicBufferMinSize = 1024 * 1024;
// This is an arbitrary max. We can change this later if necessary.
constexpr uint32_t kDefaultDescriptorPoolMaxSets = 2048;
VkImageUsageFlags GetStagingImageUsageFlags(StagingUsage usage)
{
switch (usage)
......@@ -273,77 +276,141 @@ void DynamicBuffer::reset()
mLastFlushOrInvalidateOffset = 0;
}
// DescriptorPoolHelper implementation.
DescriptorPoolHelper::DescriptorPoolHelper() : mFreeDescriptorSets(0)
{
}
DescriptorPoolHelper::~DescriptorPoolHelper() = default;
bool DescriptorPoolHelper::hasCapacity(uint32_t descriptorSetCount) const
{
return mFreeDescriptorSets >= descriptorSetCount;
}
angle::Result DescriptorPoolHelper::init(Context *context,
const VkDescriptorPoolSize &poolSize,
uint32_t maxSets)
{
if (mDescriptorPool.valid())
{
// This could be improved by recycling the descriptor pool.
mDescriptorPool.destroy(context->getDevice());
}
VkDescriptorPoolCreateInfo descriptorPoolInfo = {};
descriptorPoolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
descriptorPoolInfo.flags = 0;
descriptorPoolInfo.maxSets = maxSets;
descriptorPoolInfo.poolSizeCount = 1u;
descriptorPoolInfo.pPoolSizes = &poolSize;
mFreeDescriptorSets = maxSets;
return mDescriptorPool.init(context, descriptorPoolInfo);
}
void DescriptorPoolHelper::destroy(VkDevice device)
{
mDescriptorPool.destroy(device);
}
angle::Result DescriptorPoolHelper::allocateSets(Context *context,
const VkDescriptorSetLayout *descriptorSetLayout,
uint32_t descriptorSetCount,
VkDescriptorSet *descriptorSetsOut)
{
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = mDescriptorPool.getHandle();
allocInfo.descriptorSetCount = descriptorSetCount;
allocInfo.pSetLayouts = descriptorSetLayout;
ASSERT(mFreeDescriptorSets >= descriptorSetCount);
mFreeDescriptorSets -= descriptorSetCount;
return mDescriptorPool.allocateDescriptorSets(context, allocInfo, descriptorSetsOut);
}
// DynamicDescriptorPool implementation.
DynamicDescriptorPool::DynamicDescriptorPool()
: mMaxSetsPerPool(kDefaultDescriptorPoolMaxSets),
mCurrentSetsCount(0),
mPoolSize{},
mFreeDescriptorSets(0)
: mMaxSetsPerPool(kDefaultDescriptorPoolMaxSets), mCurrentPoolIndex(0), mPoolSize{}
{
}
DynamicDescriptorPool::~DynamicDescriptorPool() = default;
angle::Result DynamicDescriptorPool::init(Context *context, const VkDescriptorPoolSize &poolSize)
angle::Result DynamicDescriptorPool::init(ContextVk *contextVk,
VkDescriptorType descriptorType,
uint32_t descriptorsPerSet)
{
ASSERT(!mCurrentDescriptorPool.valid());
ASSERT(mCurrentPoolIndex == 0 && !mDescriptorPools[mCurrentPoolIndex].isReferenced());
mPoolSize = poolSize;
ANGLE_TRY(allocateNewPool(context));
return angle::Result::Continue();
mPoolSize.type = descriptorType;
mPoolSize.descriptorCount = descriptorsPerSet * mMaxSetsPerPool;
return mDescriptorPools[0].get().init(contextVk, mPoolSize, mMaxSetsPerPool);
}
void DynamicDescriptorPool::destroy(VkDevice device)
{
mCurrentDescriptorPool.destroy(device);
for (SharedDescriptorPoolHelper &pool : mDescriptorPools)
{
ASSERT(!pool.isReferenced());
pool.get().destroy(device);
}
}
angle::Result DynamicDescriptorPool::allocateSets(Context *context,
angle::Result DynamicDescriptorPool::allocateSets(ContextVk *contextVk,
const VkDescriptorSetLayout *descriptorSetLayout,
uint32_t descriptorSetCount,
SharedDescriptorPoolBinding *bindingOut,
VkDescriptorSet *descriptorSetsOut)
{
if (mFreeDescriptorSets < descriptorSetCount || mCurrentSetsCount >= mMaxSetsPerPool)
if (!bindingOut->valid() || !bindingOut->get().hasCapacity(descriptorSetCount))
{
RendererVk *renderer = context->getRenderer();
Serial currentSerial = renderer->getCurrentQueueSerial();
// We will bust the limit of descriptor set with this allocation so we need to get a new
// pool for it.
renderer->releaseObject(currentSerial, &mCurrentDescriptorPool);
ANGLE_TRY(allocateNewPool(context));
}
if (!mDescriptorPools[mCurrentPoolIndex].get().hasCapacity(descriptorSetCount))
{
ANGLE_TRY(allocateNewPool(contextVk));
}
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = mCurrentDescriptorPool.getHandle();
allocInfo.descriptorSetCount = descriptorSetCount;
allocInfo.pSetLayouts = descriptorSetLayout;
// Make sure the old binding knows the descriptor sets can still be in-use. We only need
// to update the serial when we move to a new pool. This is because we only check serials
// when we move to a new pool.
if (bindingOut->valid())
{
Serial currentSerial = contextVk->getRenderer()->getCurrentQueueSerial();
bindingOut->get().updateSerial(currentSerial);
}
ANGLE_TRY(mCurrentDescriptorPool.allocateDescriptorSets(context, allocInfo, descriptorSetsOut));
bindingOut->set(&mDescriptorPools[mCurrentPoolIndex]);
}
ASSERT(mFreeDescriptorSets >= descriptorSetCount);
mFreeDescriptorSets -= descriptorSetCount;
mCurrentSetsCount++;
return angle::Result::Continue();
return bindingOut->get().allocateSets(contextVk, descriptorSetLayout, descriptorSetCount,
descriptorSetsOut);
}
angle::Result DynamicDescriptorPool::allocateNewPool(Context *context)
angle::Result DynamicDescriptorPool::allocateNewPool(ContextVk *contextVk)
{
VkDescriptorPoolCreateInfo descriptorPoolInfo = {};
descriptorPoolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
descriptorPoolInfo.flags = 0;
descriptorPoolInfo.maxSets = mMaxSetsPerPool;
RendererVk *renderer = contextVk->getRenderer();
// Reserve pools for uniform blocks and textures.
descriptorPoolInfo.poolSizeCount = 1u;
descriptorPoolInfo.pPoolSizes = &mPoolSize;
bool found = false;
mFreeDescriptorSets = mPoolSize.descriptorCount;
mCurrentSetsCount = 0;
for (size_t poolIndex = 0; poolIndex < kMaxInFlightPools; ++poolIndex)
{
if (!mDescriptorPools[poolIndex].isReferenced() &&
!renderer->isSerialInUse(mDescriptorPools[poolIndex].get().getSerial()))
{
// We cannot allocate a new pool if the current pool is available.
ASSERT(poolIndex != mCurrentPoolIndex);
mCurrentPoolIndex = poolIndex;
found = true;
break;
}
}
ANGLE_TRY(mCurrentDescriptorPool.init(context, descriptorPoolInfo));
return angle::Result::Continue();
ANGLE_VK_CHECK(contextVk, found, VK_ERROR_TOO_MANY_OBJECTS);
return mDescriptorPools[mCurrentPoolIndex].get().init(contextVk, mPoolSize, mMaxSetsPerPool);
}
void DynamicDescriptorPool::setMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool)
......
......@@ -92,8 +92,37 @@ class DynamicBuffer : angle::NonCopyable
// using the maximum number of descriptor sets and buffers with each allocation. Currently: 2
// (Vertex/Fragment) uniform buffers and 64 (MAX_ACTIVE_TEXTURES) image/samplers.
// This is an arbitrary max. We can change this later if necessary.
constexpr uint32_t kDefaultDescriptorPoolMaxSets = 2048;
// Shared handle to a descriptor pool. Each helper is allocated from the dynamic descriptor pool.
// Can be used to share descriptor pools between multiple ProgramVks and the ContextVk.
class DescriptorPoolHelper
{
public:
DescriptorPoolHelper();
~DescriptorPoolHelper();
bool valid() { return mDescriptorPool.valid(); }
bool hasCapacity(uint32_t descriptorSetCount) const;
angle::Result init(Context *context, const VkDescriptorPoolSize &poolSize, uint32_t maxSets);
void destroy(VkDevice device);
angle::Result allocateSets(Context *context,
const VkDescriptorSetLayout *descriptorSetLayout,
uint32_t descriptorSetCount,
VkDescriptorSet *descriptorSetsOut);
void updateSerial(Serial serial) { mMostRecentSerial = serial; }
Serial getSerial() const { return mMostRecentSerial; }
private:
uint32_t mFreeDescriptorSets;
DescriptorPool mDescriptorPool;
Serial mMostRecentSerial;
};
using SharedDescriptorPoolHelper = RefCounted<DescriptorPoolHelper>;
using SharedDescriptorPoolBinding = BindingPointer<DescriptorPoolHelper>;
class DynamicDescriptorPool final : angle::NonCopyable
{
......@@ -102,27 +131,33 @@ class DynamicDescriptorPool final : angle::NonCopyable
~DynamicDescriptorPool();
// The DynamicDescriptorPool only handles one pool size at this time.
angle::Result init(Context *context, const VkDescriptorPoolSize &poolSize);
angle::Result init(ContextVk *contextVk,
VkDescriptorType descriptorType,
uint32_t descriptorsPerSet);
void destroy(VkDevice device);
// We use the descriptor type to help count the number of free sets.
// By convention, sets are indexed according to the constants in vk_cache_utils.h.
angle::Result allocateSets(Context *context,
angle::Result allocateSets(ContextVk *contextVk,
const VkDescriptorSetLayout *descriptorSetLayout,
uint32_t descriptorSetCount,
SharedDescriptorPoolBinding *bindingOut,
VkDescriptorSet *descriptorSetsOut);
// For testing only!
void setMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool);
private:
angle::Result allocateNewPool(Context *context);
angle::Result allocateNewPool(ContextVk *contextVk);
// This is somewhat fragile. It limits the total number of in-flight descriptors to
// kMaxInFlightPools * kDefaultDescriptorPoolMaxSets. Currently this is ~500k.
static constexpr size_t kMaxInFlightPools = 256;
uint32_t mMaxSetsPerPool;
uint32_t mCurrentSetsCount;
DescriptorPool mCurrentDescriptorPool;
size_t mCurrentPoolIndex;
std::array<SharedDescriptorPoolHelper, kMaxInFlightPools> mDescriptorPools;
VkDescriptorPoolSize mPoolSize;
uint32_t mFreeDescriptorSets;
};
// DynamicQueryPool allocates indices out of QueryPool as needed. Once a QueryPool is exhausted,
......
......@@ -756,6 +756,86 @@ class Scoped final : angle::NonCopyable
VkDevice mDevice;
T mVar;
};
// This is a very simple RefCount class that has no autoreleasing. Used in the descriptor set and
// pipeline layout caches.
template <typename T>
class RefCounted : angle::NonCopyable
{
public:
RefCounted() : mRefCount(0) {}
explicit RefCounted(T &&newObject) : mRefCount(0), mObject(std::move(newObject)) {}
~RefCounted() { ASSERT(mRefCount == 0 && !mObject.valid()); }
RefCounted(RefCounted &&copy) : mRefCount(copy.mRefCount), mObject(std::move(copy.mObject))
{
copy.mRefCount = 0;
}
RefCounted &operator=(RefCounted &&rhs)
{
std::swap(mRefCount, rhs.mRefCount);
mObject = std::move(rhs.mObject);
return *this;
}
void addRef()
{
ASSERT(mRefCount != std::numeric_limits<uint32_t>::max());
mRefCount++;
}
void releaseRef()
{
ASSERT(isReferenced());
mRefCount--;
}
bool isReferenced() const { return mRefCount != 0; }
T &get() { return mObject; }
const T &get() const { return mObject; }
private:
uint32_t mRefCount;
T mObject;
};
template <typename T>
class BindingPointer final : angle::NonCopyable
{
public:
BindingPointer() : mRefCounted(nullptr) {}
~BindingPointer() { reset(); }
void set(RefCounted<T> *refCounted)
{
if (mRefCounted)
{
mRefCounted->releaseRef();
}
mRefCounted = refCounted;
if (mRefCounted)
{
mRefCounted->addRef();
}
}
void reset() { set(nullptr); }
T &get() { return mRefCounted->get(); }
const T &get() const { return mRefCounted->get(); }
bool valid() const { return mRefCounted != nullptr; }
private:
RefCounted<T> *mRefCounted;
};
using SharedDescriptorPool = RefCounted<DescriptorPool>;
} // namespace vk
namespace gl_vk
......
......@@ -34,6 +34,34 @@ class VulkanUniformUpdatesTest : public ANGLETest
const gl::Context *context = static_cast<gl::Context *>(getEGLWindow()->getContext());
return rx::GetImplAs<rx::ContextVk>(context);
}
rx::ProgramVk *hackProgram(GLuint handle) const
{
// Hack the angle!
const gl::Context *context = static_cast<gl::Context *>(getEGLWindow()->getContext());
const gl::Program *program = context->getProgramResolveLink(handle);
return rx::vk::GetImpl(program);
}
static constexpr uint32_t kMaxSetsForTesting = 32;
void limitMaxSets()
{
rx::ContextVk *contextVk = hackANGLE();
// Force a small limit on the max sets per pool to more easily trigger a new allocation.
rx::vk::DynamicDescriptorPool *uniformPool =
contextVk->getDynamicDescriptorPool(rx::kUniformsDescriptorSetIndex);
uniformPool->setMaxSetsPerPoolForTesting(kMaxSetsForTesting);
(void)uniformPool->init(contextVk, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,
rx::GetUniformBufferDescriptorCount());
rx::vk::DynamicDescriptorPool *texturePool =
contextVk->getDynamicDescriptorPool(rx::kTextureDescriptorSetIndex);
texturePool->setMaxSetsPerPoolForTesting(kMaxSetsForTesting);
(void)texturePool->init(contextVk, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
contextVk->getRenderer()->getMaxActiveTextures());
}
};
// This test updates a uniform until a new buffer is allocated and then make sure the uniform
......@@ -42,6 +70,8 @@ TEST_P(VulkanUniformUpdatesTest, UpdateUntilNewBufferIsAllocated)
{
ASSERT_TRUE(IsVulkan());
limitMaxSets();
constexpr char kPositionUniformVertexShader[] = R"(attribute vec2 position;
uniform vec2 uniPosModifier;
void main()
......@@ -59,8 +89,7 @@ void main()
ANGLE_GL_PROGRAM(program, kPositionUniformVertexShader, kColorUniformFragmentShader);
glUseProgram(program);
const gl::State &state = hackANGLE()->getGLState();
rx::ProgramVk *programVk = rx::vk::GetImpl(state.getProgram());
rx::ProgramVk *programVk = hackProgram(program);
// Set a really small min size so that uniform updates often allocates a new buffer.
programVk->setDefaultUniformBlocksMinSizeForTesting(128);
......@@ -97,9 +126,7 @@ TEST_P(VulkanUniformUpdatesTest, DescriptorPoolUpdates)
ASSERT_TRUE(IsVulkan());
// Force a small limit on the max sets per pool to more easily trigger a new allocation.
constexpr uint32_t kMaxSetsForTesting = 32;
rx::vk::DynamicDescriptorPool *dynamicDescriptorPool = hackANGLE()->getDynamicDescriptorPool(0);
dynamicDescriptorPool->setMaxSetsPerPoolForTesting(kMaxSetsForTesting);
limitMaxSets();
// Initialize texture program.
GLuint program = get2DTexturedQuadProgram();
......@@ -133,14 +160,7 @@ TEST_P(VulkanUniformUpdatesTest, DescriptorPoolUniformAndTextureUpdates)
{
ASSERT_TRUE(IsVulkan());
// Force a small limit on the max sets per pool to more easily trigger a new allocation.
constexpr uint32_t kMaxSetsForTesting = 32;
rx::vk::DynamicDescriptorPool *uniformPool =
hackANGLE()->getDynamicDescriptorPool(rx::kUniformsDescriptorSetIndex);
uniformPool->setMaxSetsPerPoolForTesting(kMaxSetsForTesting);
rx::vk::DynamicDescriptorPool *texturePool =
hackANGLE()->getDynamicDescriptorPool(rx::kTextureDescriptorSetIndex);
texturePool->setMaxSetsPerPoolForTesting(kMaxSetsForTesting);
limitMaxSets();
// Initialize texture program.
constexpr char kVS[] = R"(attribute vec2 position;
......@@ -211,6 +231,85 @@ void main()
}
}
// Uniform updates along with Texture updates.
TEST_P(VulkanUniformUpdatesTest, DescriptorPoolUniformAndTextureUpdatesTwoShaders)
{
ASSERT_TRUE(IsVulkan());
// Force a small limit on the max sets per pool to more easily trigger a new allocation.
limitMaxSets();
// Initialize program.
constexpr char kVS[] = R"(attribute vec2 position;
varying mediump vec2 texCoord;
void main()
{
gl_Position = vec4(position, 0, 1);
texCoord = position * 0.5 + vec2(0.5);
})";
constexpr char kFS[] = R"(varying mediump vec2 texCoord;
uniform mediump vec4 colorMask;
void main()
{
gl_FragColor = colorMask;
})";
ANGLE_GL_PROGRAM(program1, kVS, kFS);
ANGLE_GL_PROGRAM(program2, kVS, kFS);
glUseProgram(program1);
rx::ProgramVk *program1Vk = hackProgram(program1);
rx::ProgramVk *program2Vk = hackProgram(program2);
// Set a really small min size so that uniform updates often allocates a new buffer.
program1Vk->setDefaultUniformBlocksMinSizeForTesting(128);
program2Vk->setDefaultUniformBlocksMinSizeForTesting(128);
// Get uniform locations.
GLint colorMaskLoc1 = glGetUniformLocation(program1, "colorMask");
ASSERT_NE(-1, colorMaskLoc1);
GLint colorMaskLoc2 = glGetUniformLocation(program2, "colorMask");
ASSERT_NE(-1, colorMaskLoc2);
// Draw with white using program1.
glUniform4f(colorMaskLoc1, 1.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program1, "position", 0.5f, 1.0f, true);
swapBuffers();
ASSERT_GL_NO_ERROR();
// Now switch to use program2
glUseProgram(program2);
// Draw multiple times w/ program2, each iteration will create a new descriptor set.
// This will cause the first descriptor pool to be cleaned up
for (uint32_t iteration = 0; iteration < kMaxSetsForTesting * 2; ++iteration)
{
// Draw with white.
glUniform4f(colorMaskLoc2, 1.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program2, "position", 0.5f, 1.0f, true);
// Draw with white masking out red.
glUniform4f(colorMaskLoc2, 0.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program2, "position", 0.5f, 1.0f, true);
// Draw with magenta.
glUniform4f(colorMaskLoc2, 1.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program2, "position", 0.5f, 1.0f, true);
// Draw with magenta masking out red.
glUniform4f(colorMaskLoc2, 0.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program2, "position", 0.5f, 1.0f, true);
swapBuffers();
ASSERT_GL_NO_ERROR();
}
// Finally, attempt to draw again with program1, with original uniform values.
glUseProgram(program1);
drawQuad(program1, "position", 0.5f, 1.0f, true);
swapBuffers();
ASSERT_GL_NO_ERROR();
}
ANGLE_INSTANTIATE_TEST(VulkanUniformUpdatesTest, ES2_VULKAN());
} // anonymous namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment