Commit 88596bea by Jamie Madill Committed by Commit Bot

Vulkan: Implement a texture descriptor cache.

We noticed a significant hotspot in vkAllocateDesctiptorSets. The app was repeatedly cycling through a few combinations of active textures. For each state change in ANGLE we were allocating a new desctiptor set. This in turn would trigger internal driver memory allocation and cause jank. Using a cache avoids allocations entirely since the application is rotating through a stable set of textures. The descriptor cache is stored in each program. It is indexed by a set of 32-bit serials. Each texture generates a unique serial for every combination of VkImage and VkSampler that the texture owns. The texture descriptor is refreshed every time a texture changes or is rebound. The descriptor cache is accessed via an unoredered map with the texture serial sets as the hash key. We also store the maximum active texture index in the cache key so we don't need to hash and memcmp on all 64 active textures. This will currently fail if more than MAX_UINT serials are generated. But that number is high enough that it shouldn't be possible to hit in practice in a practical amount of time. Requires shifting the texture sync to ContextVk so we can get the new serial after the textures are updated. And to make sure to update the image layouts even if the descriptors are not dirty. Improves performance of the T-Rex demo. Also improves the score of the texture state change microbenchmark by about 40%. Bug: angleproject:3117 Change-Id: Ieb9bec1e8c1a7619814afab767a1980b959a8241 Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/1642226Reviewed-by: 's avatarJamie Madill <jmadill@chromium.org> Commit-Queue: Jamie Madill <jmadill@chromium.org>
parent 95428331
......@@ -211,6 +211,7 @@ ContextVk::ContextVk(const gl::State &state, gl::ErrorSet *errorSet, RendererVk
mDirtyBitHandlers[DIRTY_BIT_DESCRIPTOR_SETS] = &ContextVk::handleDirtyDescriptorSets;
mDirtyBits = mNewCommandBufferDirtyBits;
mActiveTextures.fill(nullptr);
}
#undef INIT
......@@ -1999,7 +2000,9 @@ angle::Result ContextVk::updateActiveTextures(const gl::Context *context)
const gl::State &glState = mState;
const gl::Program *program = glState.getProgram();
mActiveTextures.fill(nullptr);
uint32_t prevMaxIndex = mActiveTexturesDesc.getMaxIndex();
memset(mActiveTextures.data(), 0, sizeof(mActiveTextures[0]) * prevMaxIndex);
mActiveTexturesDesc.reset();
const gl::ActiveTexturePointerArray &textures = glState.getActiveTexturesCache();
const gl::ActiveTextureMask &activeTextures = program->getActiveSamplersMask();
......@@ -2016,7 +2019,29 @@ angle::Result ContextVk::updateActiveTextures(const gl::Context *context)
ANGLE_TRY(getIncompleteTexture(context, textureType, &texture));
}
mActiveTextures[textureUnit] = vk::GetImpl(texture);
TextureVk *textureVk = vk::GetImpl(texture);
// Ensure any writes to the textures are flushed before we read from them.
ANGLE_TRY(textureVk->ensureImageInitialized(this));
vk::ImageHelper &image = textureVk->getImage();
// Ensure the image is in read-only layout
if (image.isLayoutChangeNecessary(vk::ImageLayout::FragmentShaderReadOnly))
{
vk::CommandBuffer *srcLayoutChange;
ANGLE_TRY(image.recordCommands(this, &srcLayoutChange));
VkImageAspectFlags aspectFlags = image.getAspectFlags();
ASSERT(aspectFlags != 0);
image.changeLayout(aspectFlags, vk::ImageLayout::FragmentShaderReadOnly,
srcLayoutChange);
}
image.addReadDependency(mDrawFramebuffer->getFramebuffer());
mActiveTextures[textureUnit] = textureVk;
mActiveTexturesDesc.update(textureUnit, textureVk->getSerial());
}
return angle::Result::Continue;
......
......@@ -301,6 +301,13 @@ class ContextVk : public ContextImpl, public vk::Context, public vk::CommandBuff
vk::DescriptorSetLayoutDesc getDriverUniformsDescriptorSetDesc() const;
// We use texture serials to optimize texture binding updates. Each permutation of a
// {VkImage/VkSampler} generates a unique serial. These serials are combined to form a unique
// signature for each descriptor set. This allows us to keep a cache of descriptor sets and
// avoid calling vkAllocateDesctiporSets each texture update.
Serial generateTextureSerial() { return mTextureSerialFactory.generate(); }
const vk::TextureDescriptorDesc &getActiveTexturesDesc() const { return mActiveTexturesDesc; }
private:
// Dirty bits.
enum DirtyBitType : size_t
......@@ -464,6 +471,7 @@ class ContextVk : public ContextImpl, public vk::Context, public vk::CommandBuff
// This cache should also probably include the texture index (shader location) and array
// index (also in the shader). This info is used in the descriptor update step.
gl::ActiveTextureArray<TextureVk *> mActiveTextures;
vk::TextureDescriptorDesc mActiveTexturesDesc;
// "Current Value" aka default vertex attribute state.
gl::AttributesMask mDirtyDefaultAttribsMask;
......@@ -568,6 +576,9 @@ class ContextVk : public ContextImpl, public vk::Context, public vk::CommandBuff
// have a value close to zero, to avoid losing 12 bits when converting these 64 bit values to
// double.
uint64_t mGpuEventTimestampOrigin;
// Generator for texure serials.
SerialFactory mTextureSerialFactory;
};
} // namespace rx
......
......@@ -261,6 +261,8 @@ void ProgramVk::reset(ContextVk *contextVk)
{
descriptorPool.release(contextVk);
}
mTextureDescriptorsCache.clear();
}
std::unique_ptr<rx::LinkEvent> ProgramVk::load(const gl::Context *context,
......@@ -779,6 +781,14 @@ void ProgramVk::setPathFragmentInputGen(const std::string &inputName,
angle::Result ProgramVk::allocateDescriptorSet(ContextVk *contextVk, uint32_t descriptorSetIndex)
{
bool ignoreNewPoolAllocated;
return allocateDescriptorSetAndGetInfo(contextVk, descriptorSetIndex, &ignoreNewPoolAllocated);
}
angle::Result ProgramVk::allocateDescriptorSetAndGetInfo(ContextVk *contextVk,
uint32_t descriptorSetIndex,
bool *newPoolAllocatedOut)
{
vk::DynamicDescriptorPool &dynamicDescriptorPool = mDynamicDescriptorPools[descriptorSetIndex];
uint32_t potentialNewCount = descriptorSetIndex + 1;
......@@ -789,9 +799,9 @@ angle::Result ProgramVk::allocateDescriptorSet(ContextVk *contextVk, uint32_t de
const vk::DescriptorSetLayout &descriptorSetLayout =
mDescriptorSetLayouts[descriptorSetIndex].get();
ANGLE_TRY(dynamicDescriptorPool.allocateSets(contextVk, descriptorSetLayout.ptr(), 1,
&mDescriptorPoolBindings[descriptorSetIndex],
&mDescriptorSets[descriptorSetIndex]));
ANGLE_TRY(dynamicDescriptorPool.allocateSetsAndGetInfo(
contextVk, descriptorSetLayout.ptr(), 1, &mDescriptorPoolBindings[descriptorSetIndex],
&mDescriptorSets[descriptorSetIndex], newPoolAllocatedOut));
mEmptyDescriptorSets[descriptorSetIndex] = VK_NULL_HANDLE;
return angle::Result::Continue;
......@@ -982,8 +992,25 @@ angle::Result ProgramVk::updateUniformBuffersDescriptorSet(ContextVk *contextVk,
angle::Result ProgramVk::updateTexturesDescriptorSet(ContextVk *contextVk,
vk::FramebufferHelper *framebuffer)
{
const vk::TextureDescriptorDesc &texturesDesc = contextVk->getActiveTexturesDesc();
auto iter = mTextureDescriptorsCache.find(texturesDesc);
if (iter != mTextureDescriptorsCache.end())
{
mDescriptorSets[kTextureDescriptorSetIndex] = iter->second;
return angle::Result::Continue;
}
ASSERT(hasTextures());
ANGLE_TRY(allocateDescriptorSet(contextVk, kTextureDescriptorSetIndex));
bool newPoolAllocated;
ANGLE_TRY(
allocateDescriptorSetAndGetInfo(contextVk, kTextureDescriptorSetIndex, &newPoolAllocated));
// Clear descriptor set cache. It may no longer be valid.
if (newPoolAllocated)
{
mTextureDescriptorsCache.clear();
}
VkDescriptorSet descriptorSet = mDescriptorSets[kTextureDescriptorSetIndex];
......@@ -1006,24 +1033,8 @@ angle::Result ProgramVk::updateTexturesDescriptorSet(ContextVk *contextVk,
GLuint textureUnit = samplerBinding.boundTextureUnits[arrayElement];
TextureVk *textureVk = activeTextures[textureUnit];
// Ensure any writes to the textures are flushed before we read from them.
ANGLE_TRY(textureVk->ensureImageInitialized(contextVk));
vk::ImageHelper &image = textureVk->getImage();
// Ensure the image is in read-only layout
if (image.isLayoutChangeNecessary(vk::ImageLayout::FragmentShaderReadOnly))
{
vk::CommandBuffer *srcLayoutChange;
ANGLE_TRY(image.recordCommands(contextVk, &srcLayoutChange));
VkImageAspectFlags aspectFlags = image.getAspectFlags();
ASSERT(aspectFlags != 0);
image.changeLayout(aspectFlags, vk::ImageLayout::FragmentShaderReadOnly,
srcLayoutChange);
}
image.addReadDependency(framebuffer);
VkDescriptorImageInfo &imageInfo = descriptorImageInfo[writeCount];
imageInfo.sampler = textureVk->getSampler().getHandle();
......@@ -1052,6 +1063,8 @@ angle::Result ProgramVk::updateTexturesDescriptorSet(ContextVk *contextVk,
ASSERT(writeCount > 0);
vkUpdateDescriptorSets(device, writeCount, writeDescriptorInfo.data(), 0, nullptr);
mTextureDescriptorsCache.emplace(texturesDesc, descriptorSet);
return angle::Result::Continue;
}
......
......@@ -154,6 +154,9 @@ class ProgramVk : public ProgramImpl
void reset(ContextVk *contextVk);
angle::Result allocateDescriptorSet(ContextVk *contextVk, uint32_t descriptorSetIndex);
angle::Result allocateDescriptorSetAndGetInfo(ContextVk *contextVk,
uint32_t descriptorSetIndex,
bool *newPoolAllocatedOut);
angle::Result initDefaultUniformBlocks(const gl::Context *glContext);
angle::Result updateDefaultUniformsDescriptorSet(ContextVk *contextVk);
......@@ -227,6 +230,8 @@ class ProgramVk : public ProgramImpl
std::vector<VkDescriptorSet> mDescriptorSets;
vk::DescriptorSetLayoutArray<VkDescriptorSet> mEmptyDescriptorSets;
std::unordered_map<vk::TextureDescriptorDesc, VkDescriptorSet> mTextureDescriptorsCache;
// We keep a reference to the pipeline and descriptor set layouts. This ensures they don't get
// deleted while this program is in use.
vk::BindingPointer<vk::PipelineLayout> mPipelineLayout;
......
......@@ -836,21 +836,21 @@ void TextureVk::releaseAndDeleteImage(ContextVk *context)
}
}
angle::Result TextureVk::ensureImageAllocated(ContextVk *context, const vk::Format &format)
angle::Result TextureVk::ensureImageAllocated(ContextVk *contextVk, const vk::Format &format)
{
if (mImage == nullptr)
{
setImageHelper(context, new vk::ImageHelper(), mState.getType(), format, 0, 0, true);
setImageHelper(contextVk, new vk::ImageHelper(), mState.getType(), format, 0, 0, true);
}
else
{
updateImageHelper(context, format);
updateImageHelper(contextVk, format);
}
return angle::Result::Continue;
}
void TextureVk::setImageHelper(ContextVk *context,
void TextureVk::setImageHelper(ContextVk *contextVk,
vk::ImageHelper *imageHelper,
gl::TextureType imageType,
const vk::Format &format,
......@@ -865,19 +865,21 @@ void TextureVk::setImageHelper(ContextVk *context,
mImageLevelOffset = imageLevelOffset;
mImageLayerOffset = imageLayerOffset;
mImage = imageHelper;
mImage->initStagingBuffer(context->getRenderer(), format);
mImage->initStagingBuffer(contextVk->getRenderer(), format);
mRenderTarget.init(mImage, &mDrawBaseLevelImageView, &mFetchBaseLevelImageView,
getNativeImageLevel(0), getNativeImageLayer(0));
// Force re-creation of cube map render targets next time they are needed
mCubeMapRenderTargets.clear();
mSerial = contextVk->generateTextureSerial();
}
void TextureVk::updateImageHelper(ContextVk *context, const vk::Format &format)
void TextureVk::updateImageHelper(ContextVk *contextVk, const vk::Format &format)
{
ASSERT(mImage != nullptr);
mImage->initStagingBuffer(context->getRenderer(), format);
mImage->initStagingBuffer(contextVk->getRenderer(), format);
}
angle::Result TextureVk::redefineImage(const gl::Context *context,
......@@ -1211,6 +1213,10 @@ angle::Result TextureVk::syncState(const gl::Context *context,
samplerInfo.unnormalizedCoordinates = VK_FALSE;
ANGLE_VK_TRY(contextVk, mSampler.init(contextVk->getDevice(), samplerInfo));
// Regenerate the serial on a sampler change.
mSerial = contextVk->generateTextureSerial();
return angle::Result::Continue;
}
......@@ -1363,6 +1369,8 @@ angle::Result TextureVk::initImage(ContextVk *contextVk,
}
}
mSerial = contextVk->generateTextureSerial();
return angle::Result::Continue;
}
......@@ -1414,13 +1422,13 @@ angle::Result TextureVk::initImageViews(ContextVk *contextVk,
return angle::Result::Continue;
}
void TextureVk::releaseImage(ContextVk *context)
void TextureVk::releaseImage(ContextVk *contextVk)
{
if (mImage)
{
if (mOwnsImage)
{
mImage->releaseImage(context);
mImage->releaseImage(contextVk);
}
else
{
......@@ -1428,25 +1436,25 @@ void TextureVk::releaseImage(ContextVk *context)
}
}
Serial currentSerial = context->getCurrentQueueSerial();
Serial currentSerial = contextVk->getCurrentQueueSerial();
context->releaseObject(currentSerial, &mDrawBaseLevelImageView);
context->releaseObject(currentSerial, &mReadBaseLevelImageView);
context->releaseObject(currentSerial, &mReadMipmapImageView);
context->releaseObject(currentSerial, &mFetchBaseLevelImageView);
context->releaseObject(currentSerial, &mFetchMipmapImageView);
contextVk->releaseObject(currentSerial, &mDrawBaseLevelImageView);
contextVk->releaseObject(currentSerial, &mReadBaseLevelImageView);
contextVk->releaseObject(currentSerial, &mReadMipmapImageView);
contextVk->releaseObject(currentSerial, &mFetchBaseLevelImageView);
contextVk->releaseObject(currentSerial, &mFetchMipmapImageView);
for (auto &layerViews : mLayerLevelDrawImageViews)
{
for (vk::ImageView &imageView : layerViews)
{
context->releaseObject(currentSerial, &imageView);
contextVk->releaseObject(currentSerial, &imageView);
}
}
mLayerLevelDrawImageViews.clear();
for (vk::ImageView &imageView : mLayerFetchImageView)
{
context->releaseObject(currentSerial, &imageView);
contextVk->releaseObject(currentSerial, &imageView);
}
mLayerFetchImageView.clear();
mCubeMapRenderTargets.clear();
......
......@@ -163,6 +163,8 @@ class TextureVk : public TextureImpl
angle::Result ensureImageInitialized(ContextVk *contextVk);
Serial getSerial() const { return mSerial; }
private:
// Transform an image index from the frontend into one that can be used on the backing
// ImageHelper, taking into account mipmap or cube face offsets
......@@ -170,9 +172,9 @@ class TextureVk : public TextureImpl
uint32_t getNativeImageLevel(uint32_t frontendLevel) const;
uint32_t getNativeImageLayer(uint32_t frontendLayer) const;
void releaseAndDeleteImage(ContextVk *context);
angle::Result ensureImageAllocated(ContextVk *context, const vk::Format &format);
void setImageHelper(ContextVk *context,
void releaseAndDeleteImage(ContextVk *contextVk);
angle::Result ensureImageAllocated(ContextVk *contextVk, const vk::Format &format);
void setImageHelper(ContextVk *contextVk,
vk::ImageHelper *imageHelper,
gl::TextureType imageType,
const vk::Format &format,
......@@ -302,6 +304,9 @@ class TextureVk : public TextureImpl
RenderTargetVk mRenderTarget;
std::vector<vk::ImageView> mLayerFetchImageView;
std::vector<RenderTargetVk> mCubeMapRenderTargets;
// The serial is used for cache indexing.
Serial mSerial;
};
} // namespace rx
......
......@@ -1436,6 +1436,52 @@ void PipelineHelper::addTransition(GraphicsPipelineTransitionBits bits,
{
mTransitions.emplace_back(bits, desc, pipeline);
}
TextureDescriptorDesc::TextureDescriptorDesc() : mMaxIndex(0)
{
mSerials.fill(0);
}
TextureDescriptorDesc::~TextureDescriptorDesc() = default;
TextureDescriptorDesc::TextureDescriptorDesc(const TextureDescriptorDesc &other) = default;
TextureDescriptorDesc &TextureDescriptorDesc::operator=(const TextureDescriptorDesc &other) =
default;
void TextureDescriptorDesc::update(size_t index, Serial serial)
{
if (index >= mMaxIndex)
{
mMaxIndex = index + 1;
}
// If the serial number overflows we should defragment and regenerate all serials.
// There should never be more than UINT_MAX textures alive at a time.
ASSERT(serial.getValue() < std::numeric_limits<uint32_t>::max());
mSerials[index] = static_cast<uint32_t>(serial.getValue());
}
size_t TextureDescriptorDesc::hash() const
{
return angle::ComputeGenericHash(&mSerials, sizeof(uint32_t) * mMaxIndex);
}
void TextureDescriptorDesc::reset()
{
memset(mSerials.data(), 0, sizeof(mSerials[0]) * mMaxIndex);
mMaxIndex = 0;
}
bool TextureDescriptorDesc::operator==(const TextureDescriptorDesc &other) const
{
if (mMaxIndex != other.mMaxIndex)
return false;
if (mMaxIndex == 0)
return true;
return memcmp(mSerials.data(), other.mSerials.data(), sizeof(uint32_t) * mMaxIndex) == 0;
}
} // namespace vk
// RenderPassCache implementation.
......
......@@ -684,6 +684,28 @@ class PipelineHelper final : angle::NonCopyable
ANGLE_INLINE PipelineHelper::PipelineHelper(Pipeline &&pipeline) : mPipeline(std::move(pipeline)) {}
class TextureDescriptorDesc
{
public:
TextureDescriptorDesc();
~TextureDescriptorDesc();
TextureDescriptorDesc(const TextureDescriptorDesc &other);
TextureDescriptorDesc &operator=(const TextureDescriptorDesc &other);
void update(size_t index, Serial serial);
size_t hash() const;
void reset();
bool operator==(const TextureDescriptorDesc &other) const;
// Note: this is an exclusive index. If there is one index it will return "1".
uint32_t getMaxIndex() const { return mMaxIndex; }
private:
uint32_t mMaxIndex;
gl::ActiveTextureArray<uint32_t> mSerials;
};
} // namespace vk
} // namespace rx
......@@ -719,6 +741,12 @@ struct hash<rx::vk::PipelineLayoutDesc>
{
size_t operator()(const rx::vk::PipelineLayoutDesc &key) const { return key.hash(); }
};
template <>
struct hash<rx::vk::TextureDescriptorDesc>
{
size_t operator()(const rx::vk::TextureDescriptorDesc &key) const { return key.hash(); }
};
} // namespace std
namespace rx
......@@ -884,7 +912,6 @@ constexpr uint32_t kReservedDriverUniformBindingCount = 1;
constexpr uint32_t kVertexUniformsBindingIndex = 0;
// Binding index for default uniforms in the fragment shader:
constexpr uint32_t kFragmentUniformsBindingIndex = 1;
} // namespace rx
#endif // LIBANGLE_RENDERER_VULKAN_VK_CACHE_UTILS_H_
......@@ -257,7 +257,7 @@ DynamicBuffer::~DynamicBuffer()
ASSERT(mBuffer == nullptr);
}
angle::Result DynamicBuffer::allocate(ContextVk *context,
angle::Result DynamicBuffer::allocate(ContextVk *contextVk,
size_t sizeInBytes,
uint8_t **ptrOut,
VkBuffer *bufferOut,
......@@ -273,8 +273,8 @@ angle::Result DynamicBuffer::allocate(ContextVk *context,
{
if (mBuffer)
{
ANGLE_TRY(flush(context));
mBuffer->unmap(context->getDevice());
ANGLE_TRY(flush(contextVk));
mBuffer->unmap(contextVk->getDevice());
mRetainedBuffers.push_back(mBuffer);
mBuffer = nullptr;
......@@ -296,7 +296,7 @@ angle::Result DynamicBuffer::allocate(ContextVk *context,
const VkMemoryPropertyFlags memoryProperty = mHostVisible
? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
: VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
ANGLE_TRY(buffer->init(context, createInfo, memoryProperty));
ANGLE_TRY(buffer->init(contextVk, createInfo, memoryProperty));
mBuffer = buffer.release();
mNextAllocationOffset = 0;
......@@ -324,7 +324,7 @@ angle::Result DynamicBuffer::allocate(ContextVk *context,
{
ASSERT(mHostVisible);
uint8_t *mappedMemory;
ANGLE_TRY(mBuffer->map(context, &mappedMemory));
ANGLE_TRY(mBuffer->map(contextVk, &mappedMemory));
*ptrOut = mappedMemory + mNextAllocationOffset;
}
......@@ -333,44 +333,44 @@ angle::Result DynamicBuffer::allocate(ContextVk *context,
return angle::Result::Continue;
}
angle::Result DynamicBuffer::flush(ContextVk *context)
angle::Result DynamicBuffer::flush(ContextVk *contextVk)
{
if (mHostVisible && (mNextAllocationOffset > mLastFlushOrInvalidateOffset))
{
ASSERT(mBuffer != nullptr);
ANGLE_TRY(mBuffer->flush(context, mLastFlushOrInvalidateOffset,
ANGLE_TRY(mBuffer->flush(contextVk, mLastFlushOrInvalidateOffset,
mNextAllocationOffset - mLastFlushOrInvalidateOffset));
mLastFlushOrInvalidateOffset = mNextAllocationOffset;
}
return angle::Result::Continue;
}
angle::Result DynamicBuffer::invalidate(ContextVk *context)
angle::Result DynamicBuffer::invalidate(ContextVk *contextVk)
{
if (mHostVisible && (mNextAllocationOffset > mLastFlushOrInvalidateOffset))
{
ASSERT(mBuffer != nullptr);
ANGLE_TRY(mBuffer->invalidate(context, mLastFlushOrInvalidateOffset,
ANGLE_TRY(mBuffer->invalidate(contextVk, mLastFlushOrInvalidateOffset,
mNextAllocationOffset - mLastFlushOrInvalidateOffset));
mLastFlushOrInvalidateOffset = mNextAllocationOffset;
}
return angle::Result::Continue;
}
void DynamicBuffer::release(ContextVk *context)
void DynamicBuffer::release(ContextVk *contextVk)
{
reset();
releaseRetainedBuffers(context);
releaseRetainedBuffers(contextVk);
if (mBuffer)
{
mBuffer->unmap(context->getDevice());
mBuffer->unmap(contextVk->getDevice());
// The buffers may not have been recording commands, but they could be used to store data so
// they should live until at most this frame. For example a vertex buffer filled entirely
// by the CPU currently never gets a chance to have its serial set.
mBuffer->updateQueueSerial(context->getCurrentQueueSerial());
mBuffer->release(context);
mBuffer->updateQueueSerial(contextVk->getCurrentQueueSerial());
mBuffer->release(contextVk);
delete mBuffer;
mBuffer = nullptr;
}
......@@ -391,13 +391,13 @@ void DynamicBuffer::release(DisplayVk *display, std::vector<GarbageObjectBase> *
}
}
void DynamicBuffer::releaseRetainedBuffers(ContextVk *context)
void DynamicBuffer::releaseRetainedBuffers(ContextVk *contextVk)
{
for (BufferHelper *toFree : mRetainedBuffers)
{
// See note in release().
toFree->updateQueueSerial(context->getCurrentQueueSerial());
toFree->release(context);
toFree->updateQueueSerial(contextVk->getCurrentQueueSerial());
toFree->release(contextVk);
delete toFree;
}
......@@ -517,7 +517,7 @@ void DescriptorPoolHelper::release(ContextVk *contextVk)
contextVk->releaseObject(contextVk->getCurrentQueueSerial(), &mDescriptorPool);
}
angle::Result DescriptorPoolHelper::allocateSets(ContextVk *context,
angle::Result DescriptorPoolHelper::allocateSets(ContextVk *contextVk,
const VkDescriptorSetLayout *descriptorSetLayout,
uint32_t descriptorSetCount,
VkDescriptorSet *descriptorSetsOut)
......@@ -531,8 +531,8 @@ angle::Result DescriptorPoolHelper::allocateSets(ContextVk *context,
ASSERT(mFreeDescriptorSets >= descriptorSetCount);
mFreeDescriptorSets -= descriptorSetCount;
ANGLE_VK_TRY(context, mDescriptorPool.allocateDescriptorSets(context->getDevice(), allocInfo,
descriptorSetsOut));
ANGLE_VK_TRY(contextVk, mDescriptorPool.allocateDescriptorSets(contextVk->getDevice(),
allocInfo, descriptorSetsOut));
return angle::Result::Continue;
}
......@@ -543,7 +543,7 @@ DynamicDescriptorPool::DynamicDescriptorPool()
DynamicDescriptorPool::~DynamicDescriptorPool() = default;
angle::Result DynamicDescriptorPool::init(ContextVk *context,
angle::Result DynamicDescriptorPool::init(ContextVk *contextVk,
const VkDescriptorPoolSize *setSizes,
uint32_t setSizeCount)
{
......@@ -558,7 +558,7 @@ angle::Result DynamicDescriptorPool::init(ContextVk *context,
}
mDescriptorPools.push_back(new RefCountedDescriptorPoolHelper());
return mDescriptorPools[0]->get().init(context, mPoolSizes, mMaxSetsPerPool);
return mDescriptorPools[0]->get().init(contextVk, mPoolSizes, mMaxSetsPerPool);
}
void DynamicDescriptorPool::destroy(VkDevice device)
......@@ -585,17 +585,22 @@ void DynamicDescriptorPool::release(ContextVk *contextVk)
mDescriptorPools.clear();
}
angle::Result DynamicDescriptorPool::allocateSets(ContextVk *context,
angle::Result DynamicDescriptorPool::allocateSetsAndGetInfo(
ContextVk *contextVk,
const VkDescriptorSetLayout *descriptorSetLayout,
uint32_t descriptorSetCount,
RefCountedDescriptorPoolBinding *bindingOut,
VkDescriptorSet *descriptorSetsOut)
VkDescriptorSet *descriptorSetsOut,
bool *newPoolAllocatedOut)
{
*newPoolAllocatedOut = false;
if (!bindingOut->valid() || !bindingOut->get().hasCapacity(descriptorSetCount))
{
if (!mDescriptorPools[mCurrentPoolIndex]->get().hasCapacity(descriptorSetCount))
{
ANGLE_TRY(allocateNewPool(context));
ANGLE_TRY(allocateNewPool(contextVk));
*newPoolAllocatedOut = true;
}
// Make sure the old binding knows the descriptor sets can still be in-use. We only need
......@@ -603,25 +608,25 @@ angle::Result DynamicDescriptorPool::allocateSets(ContextVk *context,
// when we move to a new pool.
if (bindingOut->valid())
{
Serial currentSerial = context->getCurrentQueueSerial();
Serial currentSerial = contextVk->getCurrentQueueSerial();
bindingOut->get().updateSerial(currentSerial);
}
bindingOut->set(mDescriptorPools[mCurrentPoolIndex]);
}
return bindingOut->get().allocateSets(context, descriptorSetLayout, descriptorSetCount,
return bindingOut->get().allocateSets(contextVk, descriptorSetLayout, descriptorSetCount,
descriptorSetsOut);
}
angle::Result DynamicDescriptorPool::allocateNewPool(ContextVk *context)
angle::Result DynamicDescriptorPool::allocateNewPool(ContextVk *contextVk)
{
bool found = false;
for (size_t poolIndex = 0; poolIndex < mDescriptorPools.size(); ++poolIndex)
{
if (!mDescriptorPools[poolIndex]->isReferenced() &&
!context->isSerialInUse(mDescriptorPools[poolIndex]->get().getSerial()))
!contextVk->isSerialInUse(mDescriptorPools[poolIndex]->get().getSerial()))
{
mCurrentPoolIndex = poolIndex;
found = true;
......@@ -635,10 +640,10 @@ angle::Result DynamicDescriptorPool::allocateNewPool(ContextVk *context)
mCurrentPoolIndex = mDescriptorPools.size() - 1;
static constexpr size_t kMaxPools = 99999;
ANGLE_VK_CHECK(context, mDescriptorPools.size() < kMaxPools, VK_ERROR_TOO_MANY_OBJECTS);
ANGLE_VK_CHECK(contextVk, mDescriptorPools.size() < kMaxPools, VK_ERROR_TOO_MANY_OBJECTS);
}
return mDescriptorPools[mCurrentPoolIndex]->get().init(context, mPoolSizes, mMaxSetsPerPool);
return mDescriptorPools[mCurrentPoolIndex]->get().init(contextVk, mPoolSizes, mMaxSetsPerPool);
}
void DynamicDescriptorPool::setMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool)
......@@ -656,7 +661,7 @@ template <typename Pool>
DynamicallyGrowingPool<Pool>::~DynamicallyGrowingPool() = default;
template <typename Pool>
angle::Result DynamicallyGrowingPool<Pool>::initEntryPool(Context *context, uint32_t poolSize)
angle::Result DynamicallyGrowingPool<Pool>::initEntryPool(Context *contextVk, uint32_t poolSize)
{
ASSERT(mPools.empty() && mPoolStats.empty());
mPoolSize = poolSize;
......@@ -671,9 +676,9 @@ void DynamicallyGrowingPool<Pool>::destroyEntryPool()
}
template <typename Pool>
bool DynamicallyGrowingPool<Pool>::findFreeEntryPool(ContextVk *context)
bool DynamicallyGrowingPool<Pool>::findFreeEntryPool(ContextVk *contextVk)
{
Serial lastCompletedQueueSerial = context->getLastCompletedQueueSerial();
Serial lastCompletedQueueSerial = contextVk->getLastCompletedQueueSerial();
for (size_t i = 0; i < mPools.size(); ++i)
{
if (mPoolStats[i].freedCount == mPoolSize &&
......@@ -692,7 +697,7 @@ bool DynamicallyGrowingPool<Pool>::findFreeEntryPool(ContextVk *context)
}
template <typename Pool>
angle::Result DynamicallyGrowingPool<Pool>::allocateNewEntryPool(ContextVk *context, Pool &&pool)
angle::Result DynamicallyGrowingPool<Pool>::allocateNewEntryPool(ContextVk *contextVk, Pool &&pool)
{
mPools.push_back(std::move(pool));
......@@ -706,12 +711,12 @@ angle::Result DynamicallyGrowingPool<Pool>::allocateNewEntryPool(ContextVk *cont
}
template <typename Pool>
void DynamicallyGrowingPool<Pool>::onEntryFreed(ContextVk *context, size_t poolIndex)
void DynamicallyGrowingPool<Pool>::onEntryFreed(ContextVk *contextVk, size_t poolIndex)
{
ASSERT(poolIndex < mPoolStats.size() && mPoolStats[poolIndex].freedCount < mPoolSize);
// Take note of the current serial to avoid reallocating a query in the same pool
mPoolStats[poolIndex].serial = context->getCurrentQueueSerial();
mPoolStats[poolIndex].serial = contextVk->getCurrentQueueSerial();
++mPoolStats[poolIndex].freedCount;
}
......@@ -720,12 +725,12 @@ DynamicQueryPool::DynamicQueryPool() = default;
DynamicQueryPool::~DynamicQueryPool() = default;
angle::Result DynamicQueryPool::init(ContextVk *context, VkQueryType type, uint32_t poolSize)
angle::Result DynamicQueryPool::init(ContextVk *contextVk, VkQueryType type, uint32_t poolSize)
{
ANGLE_TRY(initEntryPool(context, poolSize));
ANGLE_TRY(initEntryPool(contextVk, poolSize));
mQueryType = type;
ANGLE_TRY(allocateNewPool(context));
ANGLE_TRY(allocateNewPool(contextVk));
return angle::Result::Continue;
}
......@@ -740,40 +745,40 @@ void DynamicQueryPool::destroy(VkDevice device)
destroyEntryPool();
}
angle::Result DynamicQueryPool::allocateQuery(ContextVk *context, QueryHelper *queryOut)
angle::Result DynamicQueryPool::allocateQuery(ContextVk *contextVk, QueryHelper *queryOut)
{
ASSERT(!queryOut->getQueryPool());
size_t poolIndex = 0;
uint32_t queryIndex = 0;
ANGLE_TRY(allocateQuery(context, &poolIndex, &queryIndex));
ANGLE_TRY(allocateQuery(contextVk, &poolIndex, &queryIndex));
queryOut->init(this, poolIndex, queryIndex);
return angle::Result::Continue;
}
void DynamicQueryPool::freeQuery(ContextVk *context, QueryHelper *query)
void DynamicQueryPool::freeQuery(ContextVk *contextVk, QueryHelper *query)
{
if (query->getQueryPool())
{
size_t poolIndex = query->getQueryPoolIndex();
ASSERT(query->getQueryPool()->valid());
freeQuery(context, poolIndex, query->getQuery());
freeQuery(contextVk, poolIndex, query->getQuery());
query->deinit();
}
}
angle::Result DynamicQueryPool::allocateQuery(ContextVk *context,
angle::Result DynamicQueryPool::allocateQuery(ContextVk *contextVk,
size_t *poolIndex,
uint32_t *queryIndex)
{
if (mCurrentFreeEntry >= mPoolSize)
{
// No more queries left in this pool, create another one.
ANGLE_TRY(allocateNewPool(context));
ANGLE_TRY(allocateNewPool(contextVk));
}
*poolIndex = mCurrentPool;
......@@ -782,15 +787,15 @@ angle::Result DynamicQueryPool::allocateQuery(ContextVk *context,
return angle::Result::Continue;
}
void DynamicQueryPool::freeQuery(ContextVk *context, size_t poolIndex, uint32_t queryIndex)
void DynamicQueryPool::freeQuery(ContextVk *contextVk, size_t poolIndex, uint32_t queryIndex)
{
ANGLE_UNUSED_VARIABLE(queryIndex);
onEntryFreed(context, poolIndex);
onEntryFreed(contextVk, poolIndex);
}
angle::Result DynamicQueryPool::allocateNewPool(ContextVk *context)
angle::Result DynamicQueryPool::allocateNewPool(ContextVk *contextVk)
{
if (findFreeEntryPool(context))
if (findFreeEntryPool(contextVk))
{
return angle::Result::Continue;
}
......@@ -804,9 +809,9 @@ angle::Result DynamicQueryPool::allocateNewPool(ContextVk *context)
vk::QueryPool queryPool;
ANGLE_VK_TRY(context, queryPool.init(context->getDevice(), queryPoolInfo));
ANGLE_VK_TRY(contextVk, queryPool.init(contextVk->getDevice(), queryPoolInfo));
return allocateNewEntryPool(context, std::move(queryPool));
return allocateNewEntryPool(contextVk, std::move(queryPool));
}
// QueryHelper implementation
......@@ -830,29 +835,29 @@ void QueryHelper::deinit()
mQuery = 0;
}
void QueryHelper::beginQuery(ContextVk *context)
void QueryHelper::beginQuery(ContextVk *contextVk)
{
context->getCommandGraph()->beginQuery(getQueryPool(), getQuery());
mMostRecentSerial = context->getCurrentQueueSerial();
contextVk->getCommandGraph()->beginQuery(getQueryPool(), getQuery());
mMostRecentSerial = contextVk->getCurrentQueueSerial();
}
void QueryHelper::endQuery(ContextVk *context)
void QueryHelper::endQuery(ContextVk *contextVk)
{
context->getCommandGraph()->endQuery(getQueryPool(), getQuery());
mMostRecentSerial = context->getCurrentQueueSerial();
contextVk->getCommandGraph()->endQuery(getQueryPool(), getQuery());
mMostRecentSerial = contextVk->getCurrentQueueSerial();
}
void QueryHelper::writeTimestamp(ContextVk *context)
void QueryHelper::writeTimestamp(ContextVk *contextVk)
{
context->getCommandGraph()->writeTimestamp(getQueryPool(), getQuery());
mMostRecentSerial = context->getCurrentQueueSerial();
contextVk->getCommandGraph()->writeTimestamp(getQueryPool(), getQuery());
mMostRecentSerial = contextVk->getCurrentQueueSerial();
}
bool QueryHelper::hasPendingWork(ContextVk *context)
bool QueryHelper::hasPendingWork(ContextVk *contextVk)
{
// If the renderer has a queue serial higher than the stored one, the command buffers that
// recorded this query have already been submitted, so there is no pending work.
return mMostRecentSerial == context->getCurrentQueueSerial();
return mMostRecentSerial == contextVk->getCurrentQueueSerial();
}
// DynamicSemaphorePool implementation
......@@ -860,10 +865,10 @@ DynamicSemaphorePool::DynamicSemaphorePool() = default;
DynamicSemaphorePool::~DynamicSemaphorePool() = default;
angle::Result DynamicSemaphorePool::init(ContextVk *context, uint32_t poolSize)
angle::Result DynamicSemaphorePool::init(ContextVk *contextVk, uint32_t poolSize)
{
ANGLE_TRY(initEntryPool(context, poolSize));
ANGLE_TRY(allocateNewPool(context));
ANGLE_TRY(initEntryPool(contextVk, poolSize));
ANGLE_TRY(allocateNewPool(contextVk));
return angle::Result::Continue;
}
......@@ -880,7 +885,7 @@ void DynamicSemaphorePool::destroy(VkDevice device)
destroyEntryPool();
}
angle::Result DynamicSemaphorePool::allocateSemaphore(ContextVk *context,
angle::Result DynamicSemaphorePool::allocateSemaphore(ContextVk *contextVk,
SemaphoreHelper *semaphoreOut)
{
ASSERT(!semaphoreOut->getSemaphore());
......@@ -888,7 +893,7 @@ angle::Result DynamicSemaphorePool::allocateSemaphore(ContextVk *context,
if (mCurrentFreeEntry >= mPoolSize)
{
// No more queries left in this pool, create another one.
ANGLE_TRY(allocateNewPool(context));
ANGLE_TRY(allocateNewPool(contextVk));
}
semaphoreOut->init(mCurrentPool, &mPools[mCurrentPool][mCurrentFreeEntry++]);
......@@ -896,18 +901,18 @@ angle::Result DynamicSemaphorePool::allocateSemaphore(ContextVk *context,
return angle::Result::Continue;
}
void DynamicSemaphorePool::freeSemaphore(ContextVk *context, SemaphoreHelper *semaphore)
void DynamicSemaphorePool::freeSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphore)
{
if (semaphore->getSemaphore())
{
onEntryFreed(context, semaphore->getSemaphorePoolIndex());
onEntryFreed(contextVk, semaphore->getSemaphorePoolIndex());
semaphore->deinit();
}
}
angle::Result DynamicSemaphorePool::allocateNewPool(ContextVk *context)
angle::Result DynamicSemaphorePool::allocateNewPool(ContextVk *contextVk)
{
if (findFreeEntryPool(context))
if (findFreeEntryPool(contextVk))
{
return angle::Result::Continue;
}
......@@ -916,14 +921,14 @@ angle::Result DynamicSemaphorePool::allocateNewPool(ContextVk *context)
for (Semaphore &semaphore : newPool)
{
ANGLE_VK_TRY(context, semaphore.init(context->getDevice()));
ANGLE_VK_TRY(contextVk, semaphore.init(contextVk->getDevice()));
}
// This code is safe as long as the growth of the outer vector in vector<vector<T>> is done by
// moving the inner vectors, making sure references to the inner vector remain intact.
Semaphore *assertMove = mPools.size() > 0 ? mPools[0].data() : nullptr;
ANGLE_TRY(allocateNewEntryPool(context, std::move(newPool)));
ANGLE_TRY(allocateNewEntryPool(contextVk, std::move(newPool)));
ASSERT(assertMove == nullptr || assertMove == mPools[0].data());
......@@ -1097,9 +1102,9 @@ angle::Result LineLoopHelper::streamIndices(ContextVk *contextVk,
return angle::Result::Continue;
}
void LineLoopHelper::release(ContextVk *context)
void LineLoopHelper::release(ContextVk *contextVk)
{
mDynamicIndexBuffer.release(context);
mDynamicIndexBuffer.release(contextVk);
}
void LineLoopHelper::destroy(VkDevice device)
......@@ -1128,13 +1133,13 @@ BufferHelper::BufferHelper()
BufferHelper::~BufferHelper() = default;
angle::Result BufferHelper::init(ContextVk *context,
angle::Result BufferHelper::init(ContextVk *contextVk,
const VkBufferCreateInfo &createInfo,
VkMemoryPropertyFlags memoryPropertyFlags)
{
mSize = createInfo.size;
ANGLE_VK_TRY(context, mBuffer.init(context->getDevice(), createInfo));
return vk::AllocateBufferMemory(context, memoryPropertyFlags, &mMemoryPropertyFlags, nullptr,
ANGLE_VK_TRY(contextVk, mBuffer.init(contextVk->getDevice(), createInfo));
return vk::AllocateBufferMemory(contextVk, memoryPropertyFlags, &mMemoryPropertyFlags, nullptr,
&mBuffer, &mDeviceMemory);
}
......@@ -1149,15 +1154,15 @@ void BufferHelper::destroy(VkDevice device)
mDeviceMemory.destroy(device);
}
void BufferHelper::release(ContextVk *context)
void BufferHelper::release(ContextVk *contextVk)
{
unmap(context->getDevice());
unmap(contextVk->getDevice());
mSize = 0;
mViewFormat = nullptr;
context->releaseObject(getStoredQueueSerial(), &mBuffer);
context->releaseObject(getStoredQueueSerial(), &mBufferView);
context->releaseObject(getStoredQueueSerial(), &mDeviceMemory);
contextVk->releaseObject(getStoredQueueSerial(), &mBuffer);
contextVk->releaseObject(getStoredQueueSerial(), &mBufferView);
contextVk->releaseObject(getStoredQueueSerial(), &mDeviceMemory);
}
void BufferHelper::release(DisplayVk *display, std::vector<GarbageObjectBase> *garbageQueue)
......@@ -1182,14 +1187,14 @@ void BufferHelper::onWrite(VkAccessFlags writeAccessType)
mCurrentReadAccess = 0;
}
angle::Result BufferHelper::copyFromBuffer(ContextVk *context,
angle::Result BufferHelper::copyFromBuffer(ContextVk *contextVk,
const Buffer &buffer,
VkAccessFlags bufferAccessType,
const VkBufferCopy &copyRegion)
{
// 'recordCommands' will implicitly stop any reads from using the old buffer data.
vk::CommandBuffer *commandBuffer = nullptr;
ANGLE_TRY(recordCommands(context, &commandBuffer));
ANGLE_TRY(recordCommands(contextVk, &commandBuffer));
if (mCurrentReadAccess != 0 || mCurrentWriteAccess != 0 || bufferAccessType != 0)
{
......@@ -1213,7 +1218,7 @@ angle::Result BufferHelper::copyFromBuffer(ContextVk *context,
return angle::Result::Continue;
}
angle::Result BufferHelper::initBufferView(ContextVk *context, const Format &format)
angle::Result BufferHelper::initBufferView(ContextVk *contextVk, const Format &format)
{
ASSERT(format.valid());
......@@ -1230,15 +1235,15 @@ angle::Result BufferHelper::initBufferView(ContextVk *context, const Format &for
viewCreateInfo.offset = 0;
viewCreateInfo.range = mSize;
ANGLE_VK_TRY(context, mBufferView.init(context->getDevice(), viewCreateInfo));
ANGLE_VK_TRY(contextVk, mBufferView.init(contextVk->getDevice(), viewCreateInfo));
mViewFormat = &format;
return angle::Result::Continue;
}
angle::Result BufferHelper::mapImpl(ContextVk *context)
angle::Result BufferHelper::mapImpl(ContextVk *contextVk)
{
ANGLE_VK_TRY(context, mDeviceMemory.map(context->getDevice(), 0, mSize, 0, &mMappedMemory));
ANGLE_VK_TRY(contextVk, mDeviceMemory.map(contextVk->getDevice(), 0, mSize, 0, &mMappedMemory));
return angle::Result::Continue;
}
......@@ -1251,7 +1256,7 @@ void BufferHelper::unmap(VkDevice device)
}
}
angle::Result BufferHelper::flush(ContextVk *context, size_t offset, size_t size)
angle::Result BufferHelper::flush(ContextVk *contextVk, size_t offset, size_t size)
{
bool hostVisible = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
bool hostCoherent = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
......@@ -1262,12 +1267,12 @@ angle::Result BufferHelper::flush(ContextVk *context, size_t offset, size_t size
range.memory = mDeviceMemory.getHandle();
range.offset = offset;
range.size = size;
ANGLE_VK_TRY(context, vkFlushMappedMemoryRanges(context->getDevice(), 1, &range));
ANGLE_VK_TRY(contextVk, vkFlushMappedMemoryRanges(contextVk->getDevice(), 1, &range));
}
return angle::Result::Continue;
}
angle::Result BufferHelper::invalidate(ContextVk *context, size_t offset, size_t size)
angle::Result BufferHelper::invalidate(ContextVk *contextVk, size_t offset, size_t size)
{
bool hostVisible = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
bool hostCoherent = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
......@@ -1278,7 +1283,7 @@ angle::Result BufferHelper::invalidate(ContextVk *context, size_t offset, size_t
range.memory = mDeviceMemory.getHandle();
range.offset = offset;
range.size = size;
ANGLE_VK_TRY(context, vkInvalidateMappedMemoryRanges(context->getDevice(), 1, &range));
ANGLE_VK_TRY(contextVk, vkInvalidateMappedMemoryRanges(contextVk->getDevice(), 1, &range));
}
return angle::Result::Continue;
}
......@@ -1397,10 +1402,10 @@ angle::Result ImageHelper::initExternal(Context *context,
return angle::Result::Continue;
}
void ImageHelper::releaseImage(ContextVk *context)
void ImageHelper::releaseImage(ContextVk *contextVk)
{
context->releaseObject(getStoredQueueSerial(), &mImage);
context->releaseObject(getStoredQueueSerial(), &mDeviceMemory);
contextVk->releaseObject(getStoredQueueSerial(), &mImage);
contextVk->releaseObject(getStoredQueueSerial(), &mDeviceMemory);
}
void ImageHelper::releaseImage(DisplayVk *display, std::vector<GarbageObjectBase> *garbageQueue)
......@@ -1409,14 +1414,14 @@ void ImageHelper::releaseImage(DisplayVk *display, std::vector<GarbageObjectBase
mDeviceMemory.dumpResources(garbageQueue);
}
void ImageHelper::releaseStagingBuffer(ContextVk *context)
void ImageHelper::releaseStagingBuffer(ContextVk *contextVk)
{
// Remove updates that never made it to the texture.
for (SubresourceUpdate &update : mSubresourceUpdates)
{
update.release(context);
update.release(contextVk);
}
mStagingBuffer.release(context);
mStagingBuffer.release(contextVk);
mSubresourceUpdates.clear();
}
......@@ -1866,7 +1871,7 @@ void ImageHelper::resolve(ImageHelper *dest,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
}
void ImageHelper::removeStagedUpdates(ContextVk *context, const gl::ImageIndex &index)
void ImageHelper::removeStagedUpdates(ContextVk *contextVk, const gl::ImageIndex &index)
{
// Find any staged updates for this index and removes them from the pending list.
uint32_t levelIndex = index.getLevelIndex();
......@@ -1877,7 +1882,7 @@ void ImageHelper::removeStagedUpdates(ContextVk *context, const gl::ImageIndex &
auto update = mSubresourceUpdates.begin() + index;
if (update->isUpdateToLayerLevel(layerIndex, levelIndex))
{
update->release(context);
update->release(contextVk);
mSubresourceUpdates.erase(update);
}
else
......@@ -2203,7 +2208,7 @@ angle::Result ImageHelper::allocateStagingMemory(ContextVk *contextVk,
newBufferAllocatedOut);
}
angle::Result ImageHelper::flushStagedUpdates(ContextVk *context,
angle::Result ImageHelper::flushStagedUpdates(ContextVk *contextVk,
uint32_t levelStart,
uint32_t levelEnd,
uint32_t layerStart,
......@@ -2215,7 +2220,7 @@ angle::Result ImageHelper::flushStagedUpdates(ContextVk *context,
return angle::Result::Continue;
}
ANGLE_TRY(mStagingBuffer.flush(context));
ANGLE_TRY(mStagingBuffer.flush(contextVk));
std::vector<SubresourceUpdate> updatesToKeep;
const VkImageAspectFlags aspectFlags = GetFormatAspectFlags(mFormat->imageFormat());
......@@ -2289,7 +2294,7 @@ angle::Result ImageHelper::flushStagedUpdates(ContextVk *context,
getCurrentLayout(), 1, &update.image.copyRegion);
}
update.release(context);
update.release(contextVk);
}
// Only remove the updates that were actually applied to the image.
......@@ -2297,18 +2302,18 @@ angle::Result ImageHelper::flushStagedUpdates(ContextVk *context,
if (mSubresourceUpdates.empty())
{
mStagingBuffer.releaseRetainedBuffers(context);
mStagingBuffer.releaseRetainedBuffers(contextVk);
}
return angle::Result::Continue;
}
angle::Result ImageHelper::flushAllStagedUpdates(ContextVk *context)
angle::Result ImageHelper::flushAllStagedUpdates(ContextVk *contextVk)
{
// Clear the image.
vk::CommandBuffer *commandBuffer = nullptr;
ANGLE_TRY(recordCommands(context, &commandBuffer));
return flushStagedUpdates(context, 0, mLevelCount, 0, mLayerCount, commandBuffer);
ANGLE_TRY(recordCommands(contextVk, &commandBuffer));
return flushStagedUpdates(contextVk, 0, mLevelCount, 0, mLayerCount, commandBuffer);
}
// ImageHelper::SubresourceUpdate implementation
......@@ -2353,12 +2358,12 @@ ImageHelper::SubresourceUpdate::SubresourceUpdate(const SubresourceUpdate &other
}
}
void ImageHelper::SubresourceUpdate::release(ContextVk *context)
void ImageHelper::SubresourceUpdate::release(ContextVk *contextVk)
{
if (updateSource == UpdateSource::Image)
{
image.image->releaseImage(context);
image.image->releaseStagingBuffer(context);
image.image->releaseImage(contextVk);
image.image->releaseStagingBuffer(contextVk);
SafeDelete(image.image);
}
}
......
......@@ -50,7 +50,7 @@ class DynamicBuffer : angle::NonCopyable
// a new buffer to be created (which is returned in the optional parameter
// `newBufferAllocatedOut`). The new region will be in the returned buffer at given offset. If
// a memory pointer is given, the buffer will be automatically map()ed.
angle::Result allocate(ContextVk *context,
angle::Result allocate(ContextVk *contextVk,
size_t sizeInBytes,
uint8_t **ptrOut,
VkBuffer *bufferOut,
......@@ -58,17 +58,17 @@ class DynamicBuffer : angle::NonCopyable
bool *newBufferAllocatedOut);
// After a sequence of writes, call flush to ensure the data is visible to the device.
angle::Result flush(ContextVk *context);
angle::Result flush(ContextVk *contextVk);
// After a sequence of writes, call invalidate to ensure the data is visible to the host.
angle::Result invalidate(ContextVk *context);
angle::Result invalidate(ContextVk *contextVk);
// This releases resources when they might currently be in use.
void release(ContextVk *context);
void release(ContextVk *contextVk);
void release(DisplayVk *display, std::vector<GarbageObjectBase> *garbageQueue);
// This releases all the buffers that have been allocated since this was last called.
void releaseRetainedBuffers(ContextVk *context);
void releaseRetainedBuffers(ContextVk *contextVk);
void releaseRetainedBuffers(DisplayVk *display, std::vector<GarbageObjectBase> *garbageQueue);
// This frees resources immediately.
......@@ -121,7 +121,7 @@ class DescriptorPoolHelper
void destroy(VkDevice device);
void release(ContextVk *contextVk);
angle::Result allocateSets(ContextVk *context,
angle::Result allocateSets(ContextVk *contextVk,
const VkDescriptorSetLayout *descriptorSetLayout,
uint32_t descriptorSetCount,
VkDescriptorSet *descriptorSetsOut);
......@@ -148,7 +148,7 @@ class DynamicDescriptorPool final : angle::NonCopyable
// The DynamicDescriptorPool only handles one pool size at this time.
// Note that setSizes[i].descriptorCount is expected to be the number of descriptors in
// an individual set. The pool size will be calculated accordingly.
angle::Result init(ContextVk *context,
angle::Result init(ContextVk *contextVk,
const VkDescriptorPoolSize *setSizes,
uint32_t setSizeCount);
void destroy(VkDevice device);
......@@ -156,17 +156,31 @@ class DynamicDescriptorPool final : angle::NonCopyable
// We use the descriptor type to help count the number of free sets.
// By convention, sets are indexed according to the constants in vk_cache_utils.h.
angle::Result allocateSets(ContextVk *context,
ANGLE_INLINE angle::Result allocateSets(ContextVk *contextVk,
const VkDescriptorSetLayout *descriptorSetLayout,
uint32_t descriptorSetCount,
RefCountedDescriptorPoolBinding *bindingOut,
VkDescriptorSet *descriptorSetsOut);
VkDescriptorSet *descriptorSetsOut)
{
bool ignoreNewPoolAllocated;
return allocateSetsAndGetInfo(contextVk, descriptorSetLayout, descriptorSetCount,
bindingOut, descriptorSetsOut, &ignoreNewPoolAllocated);
}
// We use the descriptor type to help count the number of free sets.
// By convention, sets are indexed according to the constants in vk_cache_utils.h.
angle::Result allocateSetsAndGetInfo(ContextVk *contextVk,
const VkDescriptorSetLayout *descriptorSetLayout,
uint32_t descriptorSetCount,
RefCountedDescriptorPoolBinding *bindingOut,
VkDescriptorSet *descriptorSetsOut,
bool *newPoolAllocatedOut);
// For testing only!
void setMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool);
private:
angle::Result allocateNewPool(ContextVk *context);
angle::Result allocateNewPool(ContextVk *contextVk);
uint32_t mMaxSetsPerPool;
size_t mCurrentPoolIndex;
......@@ -184,18 +198,18 @@ class DynamicallyGrowingPool : angle::NonCopyable
bool isValid() { return mPoolSize > 0; }
protected:
angle::Result initEntryPool(Context *context, uint32_t poolSize);
angle::Result initEntryPool(Context *contextVk, uint32_t poolSize);
void destroyEntryPool();
// Checks to see if any pool is already free, in which case it sets it as current pool and
// returns true.
bool findFreeEntryPool(ContextVk *context);
bool findFreeEntryPool(ContextVk *contextVk);
// Allocates a new entry and initializes it with the given pool.
angle::Result allocateNewEntryPool(ContextVk *context, Pool &&pool);
angle::Result allocateNewEntryPool(ContextVk *contextVk, Pool &&pool);
// Called by the implementation whenever an entry is freed.
void onEntryFreed(ContextVk *context, size_t poolIndex);
void onEntryFreed(ContextVk *contextVk, size_t poolIndex);
// The pool size, to know when a pool is completely freed.
uint32_t mPoolSize;
......@@ -237,21 +251,21 @@ class DynamicQueryPool final : public DynamicallyGrowingPool<QueryPool>
DynamicQueryPool();
~DynamicQueryPool() override;
angle::Result init(ContextVk *context, VkQueryType type, uint32_t poolSize);
angle::Result init(ContextVk *contextVk, VkQueryType type, uint32_t poolSize);
void destroy(VkDevice device);
angle::Result allocateQuery(ContextVk *context, QueryHelper *queryOut);
void freeQuery(ContextVk *context, QueryHelper *query);
angle::Result allocateQuery(ContextVk *contextVk, QueryHelper *queryOut);
void freeQuery(ContextVk *contextVk, QueryHelper *query);
// Special allocator that doesn't work with QueryHelper, which is a CommandGraphResource.
// Currently only used with RendererVk::GpuEventQuery.
angle::Result allocateQuery(ContextVk *context, size_t *poolIndex, uint32_t *queryIndex);
void freeQuery(ContextVk *context, size_t poolIndex, uint32_t queryIndex);
angle::Result allocateQuery(ContextVk *contextVk, size_t *poolIndex, uint32_t *queryIndex);
void freeQuery(ContextVk *contextVk, size_t poolIndex, uint32_t queryIndex);
const QueryPool *getQueryPool(size_t index) const { return &mPools[index]; }
private:
angle::Result allocateNewPool(ContextVk *context);
angle::Result allocateNewPool(ContextVk *contextVk);
// Information required to create new query pools
VkQueryType mQueryType;
......@@ -286,12 +300,12 @@ class QueryHelper final
// Used only by DynamicQueryPool.
size_t getQueryPoolIndex() const { return mQueryPoolIndex; }
void beginQuery(ContextVk *context);
void endQuery(ContextVk *context);
void writeTimestamp(ContextVk *context);
void beginQuery(ContextVk *contextVk);
void endQuery(ContextVk *contextVk);
void writeTimestamp(ContextVk *contextVk);
Serial getStoredQueueSerial() { return mMostRecentSerial; }
bool hasPendingWork(ContextVk *renderer);
bool hasPendingWork(ContextVk *contextVk);
private:
const DynamicQueryPool *mDynamicQueryPool;
......@@ -315,18 +329,18 @@ class DynamicSemaphorePool final : public DynamicallyGrowingPool<std::vector<Sem
DynamicSemaphorePool();
~DynamicSemaphorePool() override;
angle::Result init(ContextVk *context, uint32_t poolSize);
angle::Result init(ContextVk *contextVk, uint32_t poolSize);
void destroy(VkDevice device);
bool isValid() { return mPoolSize > 0; }
// autoFree can be used to allocate a semaphore that's expected to be freed at the end of the
// frame. This renders freeSemaphore unnecessary and saves an eventual search.
angle::Result allocateSemaphore(ContextVk *context, SemaphoreHelper *semaphoreOut);
void freeSemaphore(ContextVk *context, SemaphoreHelper *semaphore);
angle::Result allocateSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphoreOut);
void freeSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphore);
private:
angle::Result allocateNewPool(ContextVk *context);
angle::Result allocateNewPool(ContextVk *contextVk);
};
// Semaphores that are allocated from the semaphore pool are encapsulated in a helper object,
......@@ -387,7 +401,7 @@ class LineLoopHelper final : angle::NonCopyable
vk::BufferHelper **bufferOut,
VkDeviceSize *bufferOffsetOut);
void release(ContextVk *context);
void release(ContextVk *contextVk);
void destroy(VkDevice device);
static void Draw(uint32_t count, vk::CommandBuffer *commandBuffer);
......@@ -404,12 +418,12 @@ class BufferHelper final : public CommandGraphResource
BufferHelper();
~BufferHelper() override;
angle::Result init(ContextVk *context,
angle::Result init(ContextVk *contextVk,
const VkBufferCreateInfo &createInfo,
VkMemoryPropertyFlags memoryPropertyFlags);
void destroy(VkDevice device);
void release(ContextVk *context);
void release(ContextVk *contextVk);
void release(DisplayVk *display, std::vector<GarbageObjectBase> *garbageQueue);
bool valid() const { return mBuffer.valid(); }
......@@ -432,14 +446,14 @@ class BufferHelper final : public CommandGraphResource
void onWrite(VkAccessFlags writeAccessType);
// Also implicitly sets up the correct barriers.
angle::Result copyFromBuffer(ContextVk *context,
angle::Result copyFromBuffer(ContextVk *contextVk,
const Buffer &buffer,
VkAccessFlags bufferAccessType,
const VkBufferCopy &copyRegion);
// Note: currently only one view is allowed. If needs be, multiple views can be created
// based on format.
angle::Result initBufferView(ContextVk *context, const Format &format);
angle::Result initBufferView(ContextVk *contextVk, const Format &format);
const BufferView &getBufferView() const
{
......@@ -453,11 +467,11 @@ class BufferHelper final : public CommandGraphResource
return *mViewFormat;
}
angle::Result map(ContextVk *context, uint8_t **ptrOut)
angle::Result map(ContextVk *contextVk, uint8_t **ptrOut)
{
if (!mMappedMemory)
{
ANGLE_TRY(mapImpl(context));
ANGLE_TRY(mapImpl(contextVk));
}
*ptrOut = mMappedMemory;
return angle::Result::Continue;
......@@ -465,13 +479,13 @@ class BufferHelper final : public CommandGraphResource
void unmap(VkDevice device);
// After a sequence of writes, call flush to ensure the data is visible to the device.
angle::Result flush(ContextVk *context, size_t offset, size_t size);
angle::Result flush(ContextVk *contextVk, size_t offset, size_t size);
// After a sequence of writes, call invalidate to ensure the data is visible to the host.
angle::Result invalidate(ContextVk *context, size_t offset, size_t size);
angle::Result invalidate(ContextVk *contextVk, size_t offset, size_t size);
private:
angle::Result mapImpl(ContextVk *context);
angle::Result mapImpl(ContextVk *contextVk);
// Vulkan objects.
Buffer mBuffer;
......@@ -600,10 +614,10 @@ class ImageHelper final : public CommandGraphResource
VkImageUsageFlags usage,
uint32_t layerCount);
void releaseImage(ContextVk *context);
void releaseImage(ContextVk *contextVk);
void releaseImage(DisplayVk *display, std::vector<GarbageObjectBase> *garbageQueue);
void releaseStagingBuffer(ContextVk *context);
void releaseStagingBuffer(ContextVk *contextVk);
void releaseStagingBuffer(DisplayVk *display, std::vector<GarbageObjectBase> *garbageQueue);
bool valid() const { return mImage.valid(); }
......@@ -658,7 +672,7 @@ class ImageHelper final : public CommandGraphResource
void resolve(ImageHelper *dest, const VkImageResolve &region, vk::CommandBuffer *commandBuffer);
// Data staging
void removeStagedUpdates(ContextVk *context, const gl::ImageIndex &index);
void removeStagedUpdates(ContextVk *contextVk, const gl::ImageIndex &index);
angle::Result stageSubresourceUpdate(ContextVk *contextVk,
const gl::ImageIndex &index,
......@@ -713,7 +727,7 @@ class ImageHelper final : public CommandGraphResource
// Flushes staged updates to a range of levels and layers from start to (but not including) end.
// Due to the nature of updates (done wholly to a VkImageSubresourceLayers), some unsolicited
// layers may also be updated.
angle::Result flushStagedUpdates(ContextVk *context,
angle::Result flushStagedUpdates(ContextVk *contextVk,
uint32_t levelStart,
uint32_t levelEnd,
uint32_t layerStart,
......@@ -722,7 +736,7 @@ class ImageHelper final : public CommandGraphResource
// Creates a command buffer and flushes all staged updates. This is used for one-time
// initialization of resources that we don't expect to accumulate further staged updates, such
// as with renderbuffers or surface images.
angle::Result flushAllStagedUpdates(ContextVk *context);
angle::Result flushAllStagedUpdates(ContextVk *contextVk);
bool hasStagedUpdates() const { return !mSubresourceUpdates.empty(); }
......@@ -780,7 +794,7 @@ class ImageHelper final : public CommandGraphResource
SubresourceUpdate(const VkClearValue &clearValue, const gl::ImageIndex &imageIndex);
SubresourceUpdate(const SubresourceUpdate &other);
void release(ContextVk *context);
void release(ContextVk *contextVk);
void release(DisplayVk *display, std::vector<GarbageObjectBase> *garbageQueue);
const VkImageSubresourceLayers &dstSubresource() const
......
......@@ -3646,7 +3646,8 @@ void SimpleStateChangeTest::drawToFboWithCulling(const GLenum frontFace, bool ea
GLTexture texture1;
ANGLE_GL_PROGRAM(greenProgram, essl1_shaders::vs::Simple(), essl1_shaders::fs::Green());
ANGLE_GL_PROGRAM(textureProgram, essl1_shaders::vs::Texture(), essl1_shaders::fs::Texture());
ANGLE_GL_PROGRAM(textureProgram, essl1_shaders::vs::Texture2D(),
essl1_shaders::fs::Texture2D());
bindTextureToFbo(fbo1, texture1);
......
......@@ -20,6 +20,7 @@
#include "libANGLE/renderer/vulkan/ProgramVk.h"
#include "test_utils/gl_raii.h"
#include "util/EGLWindow.h"
#include "util/shader_utils.h"
using namespace angle;
......@@ -165,23 +166,15 @@ TEST_P(VulkanUniformUpdatesTest, DescriptorPoolUniformAndTextureUpdates)
ASSERT_TRUE(IsVulkan());
// Initialize texture program.
constexpr char kVS[] = R"(attribute vec2 position;
varying mediump vec2 texCoord;
void main()
{
gl_Position = vec4(position, 0, 1);
texCoord = position * 0.5 + vec2(0.5);
})";
constexpr char kFS[] = R"(varying mediump vec2 texCoord;
constexpr char kFS[] = R"(varying mediump vec2 v_texCoord;
uniform sampler2D tex;
uniform mediump vec4 colorMask;
void main()
{
gl_FragColor = texture2D(tex, texCoord) * colorMask;
gl_FragColor = texture2D(tex, v_texCoord) * colorMask;
})";
ANGLE_GL_PROGRAM(program, kVS, kFS);
ANGLE_GL_PROGRAM(program, essl1_shaders::vs::Texture2D(), kFS);
glUseProgram(program);
limitMaxSets(program);
......@@ -215,24 +208,104 @@ void main()
// Draw with white.
glUniform1i(texLoc, 0);
glUniform4f(colorMaskLoc, 1.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program, "position", 0.5f, 1.0f, true);
drawQuad(program, essl1_shaders::PositionAttrib(), 0.5f, 1.0f, true);
// Draw with white masking out red.
glUniform4f(colorMaskLoc, 0.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program, "position", 0.5f, 1.0f, true);
drawQuad(program, essl1_shaders::PositionAttrib(), 0.5f, 1.0f, true);
// Draw with magenta.
glUniform1i(texLoc, 1);
glUniform4f(colorMaskLoc, 1.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program, "position", 0.5f, 1.0f, true);
drawQuad(program, essl1_shaders::PositionAttrib(), 0.5f, 1.0f, true);
// Draw with magenta masking out red.
glUniform4f(colorMaskLoc, 0.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program, "position", 0.5f, 1.0f, true);
drawQuad(program, essl1_shaders::PositionAttrib(), 0.5f, 1.0f, true);
swapBuffers();
ASSERT_GL_NO_ERROR();
}
}
// Uniform updates along with Texture regeneration.
TEST_P(VulkanUniformUpdatesTest, DescriptorPoolUniformAndTextureRegeneration)
{
ASSERT_TRUE(IsVulkan());
// Initialize texture program.
constexpr char kFS[] = R"(varying mediump vec2 v_texCoord;
uniform sampler2D tex;
uniform mediump vec4 colorMask;
void main()
{
gl_FragColor = texture2D(tex, v_texCoord) * colorMask;
})";
ANGLE_GL_PROGRAM(program, essl1_shaders::vs::Texture2D(), kFS);
glUseProgram(program);
limitMaxSets(program);
// Initialize large arrays of textures.
std::vector<GLTexture> whiteTextures;
std::vector<GLTexture> magentaTextures;
for (uint32_t iteration = 0; iteration < kMaxSetsForTesting * 2; ++iteration)
{
// Initialize white texture.
GLTexture whiteTexture;
InitTexture(GLColor::white, &whiteTexture);
ASSERT_GL_NO_ERROR();
whiteTextures.emplace_back(std::move(whiteTexture));
// Initialize magenta texture.
GLTexture magentaTexture;
InitTexture(GLColor::magenta, &magentaTexture);
ASSERT_GL_NO_ERROR();
magentaTextures.emplace_back(std::move(magentaTexture));
}
// Get uniform locations.
GLint texLoc = glGetUniformLocation(program, "tex");
ASSERT_NE(-1, texLoc);
GLint colorMaskLoc = glGetUniformLocation(program, "colorMask");
ASSERT_NE(-1, colorMaskLoc);
// Draw multiple times, each iteration will create a new descriptor set.
for (int outerIteration = 0; outerIteration < 2; ++outerIteration)
{
for (uint32_t iteration = 0; iteration < kMaxSetsForTesting * 2; ++iteration)
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, whiteTextures[iteration]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, magentaTextures[iteration]);
// Draw with white.
glUniform1i(texLoc, 0);
glUniform4f(colorMaskLoc, 1.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program, essl1_shaders::PositionAttrib(), 0.5f, 1.0f, true);
// Draw with white masking out red.
glUniform4f(colorMaskLoc, 0.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program, essl1_shaders::PositionAttrib(), 0.5f, 1.0f, true);
// Draw with magenta.
glUniform1i(texLoc, 1);
glUniform4f(colorMaskLoc, 1.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program, essl1_shaders::PositionAttrib(), 0.5f, 1.0f, true);
// Draw with magenta masking out red.
glUniform4f(colorMaskLoc, 0.0f, 1.0f, 1.0f, 1.0f);
drawQuad(program, essl1_shaders::PositionAttrib(), 0.5f, 1.0f, true);
swapBuffers();
ASSERT_GL_NO_ERROR();
}
}
}
// Uniform updates along with Texture updates.
......
......@@ -30,11 +30,17 @@ class GLWrapper : angle::NonCopyable
// The move-constructor and move-assignment operators are necessary so that the data within a
// GLWrapper object can be relocated.
GLWrapper(GLWrapper &&rht) : mHandle(rht.mHandle) { rht.mHandle = 0u; }
GLWrapper(GLWrapper &&rht)
: mGenFunc(rht.mGenFunc), mDeleteFunc(rht.mDeleteFunc), mHandle(rht.mHandle)
{
rht.mHandle = 0u;
}
GLWrapper &operator=(GLWrapper &&rht)
{
if (this != &rht)
{
mGenFunc = rht.mGenFunc;
mDeleteFunc = rht.mDeleteFunc;
std::swap(mHandle, rht.mHandle);
}
return *this;
......
......@@ -277,6 +277,11 @@ const char *ColorUniform()
return "u_color";
}
const char *Texture2DUniform()
{
return "u_tex2D";
}
namespace vs
{
......@@ -318,16 +323,16 @@ void main()
// A shader that simply passes through attribute a_position, setting it to gl_Position and varying
// texcoord.
const char *Texture()
const char *Texture2D()
{
return R"(precision highp float;
attribute vec4 a_position;
varying vec2 texcoord;
varying vec2 v_texCoord;
void main()
{
gl_Position = vec4(a_position.xy, 0.0, 1.0);
texcoord = a_position.xy;
v_texCoord = a_position.xy * 0.5 + vec2(0.5);
})";
}
......@@ -400,15 +405,15 @@ void main()
}
// A shader that samples the texture.
const char *Texture()
const char *Texture2D()
{
return R"(precision highp float;
uniform sampler2D tex;
varying vec2 texcoord;
return R"(precision mediump float;
uniform sampler2D u_tex;
varying vec2 v_texCoord;
void main()
{
gl_FragColor = vec4(texture2D(tex, texcoord).rgb, 1.0);
gl_FragColor = texture2D(u_tex, v_texCoord);
})";
}
......
......@@ -52,6 +52,7 @@ namespace essl1_shaders
ANGLE_UTIL_EXPORT const char *PositionAttrib();
ANGLE_UTIL_EXPORT const char *ColorUniform();
ANGLE_UTIL_EXPORT const char *Texture2DUniform();
namespace vs
{
......@@ -66,7 +67,9 @@ ANGLE_UTIL_EXPORT const char *Simple();
// v_position.
ANGLE_UTIL_EXPORT const char *Passthrough();
ANGLE_UTIL_EXPORT const char *Texture();
// A shader that simply passes through attribute a_position, setting it to gl_Position and varying
// texcoord.
ANGLE_UTIL_EXPORT const char *Texture2D();
} // namespace vs
......@@ -90,7 +93,7 @@ ANGLE_UTIL_EXPORT const char *Green();
ANGLE_UTIL_EXPORT const char *Blue();
// A shader that samples the texture
ANGLE_UTIL_EXPORT const char *Texture();
ANGLE_UTIL_EXPORT const char *Texture2D();
} // namespace fs
} // namespace essl1_shaders
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment