Commit 094974da by Ben Clayton

VkPipelineCache: Do not publically expose internal mutexes

They make it unnecessarily easy to write code that violates the mutex lock requirements. Replace with the `getOrCreate` pattern already used in vk::Device, which keeps the locking internal. This change fixes a couple of places that we were accesssing data without correct locking (real bugs). Bug: b/153194656 Change-Id: I132c450594f4042160b575197789bca7f1a5e25f Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/43650 Kokoro-Result: kokoro <noreply+kokoro@google.com> Tested-by: 's avatarBen Clayton <bclayton@google.com> Reviewed-by: 's avatarNicolas Capens <nicolascapens@google.com>
parent 4cdbb545
...@@ -86,8 +86,14 @@ public: ...@@ -86,8 +86,14 @@ public:
}; };
}; };
// getOrCreate() queries the cache for a Routine with the given key.
// If one is found, it is returned, otherwise createRoutine(key) is
// called, the returned Routine is added to the cache, and it is
// returned.
// Function must be a function of the signature:
// std::shared_ptr<rr::Routine>(const Key &)
template<typename Function> template<typename Function>
std::shared_ptr<rr::Routine> getOrCreate(const Key &key, Function createRoutine) std::shared_ptr<rr::Routine> getOrCreate(const Key &key, Function &&createRoutine)
{ {
std::lock_guard<std::mutex> lock(mutex); std::lock_guard<std::mutex> lock(mutex);
......
...@@ -483,21 +483,10 @@ void GraphicsPipeline::compileShaders(const VkAllocationCallbacks *pAllocator, c ...@@ -483,21 +483,10 @@ void GraphicsPipeline::compileShaders(const VkAllocationCallbacks *pAllocator, c
if(pPipelineCache) if(pPipelineCache)
{ {
PipelineCache &pipelineCache = *pPipelineCache; auto shader = pPipelineCache->getOrCreateShader(key, [&] {
{ return createShader(key, module, robustBufferAccess, device->getDebuggerContext());
std::unique_lock<std::mutex> lock(pipelineCache.getShaderMutex()); });
const std::shared_ptr<sw::SpirvShader> *spirvShader = pipelineCache[key];
if(!spirvShader)
{
auto shader = createShader(key, module, robustBufferAccess, device->getDebuggerContext());
setShader(pipelineStage, shader); setShader(pipelineStage, shader);
pipelineCache.insert(key, getShader(pipelineStage));
}
else
{
setShader(pipelineStage, *spirvShader);
}
}
} }
else else
{ {
...@@ -583,35 +572,14 @@ void ComputePipeline::compileShaders(const VkAllocationCallbacks *pAllocator, co ...@@ -583,35 +572,14 @@ void ComputePipeline::compileShaders(const VkAllocationCallbacks *pAllocator, co
stage.stage, stage.pName, module->getCode(), nullptr, 0, stage.pSpecializationInfo); stage.stage, stage.pName, module->getCode(), nullptr, 0, stage.pSpecializationInfo);
if(pPipelineCache) if(pPipelineCache)
{ {
PipelineCache &pipelineCache = *pPipelineCache; shader = pPipelineCache->getOrCreateShader(shaderKey, [&] {
{ return createShader(shaderKey, module, robustBufferAccess, device->getDebuggerContext());
std::unique_lock<std::mutex> lock(pipelineCache.getShaderMutex()); });
const std::shared_ptr<sw::SpirvShader> *spirvShader = pipelineCache[shaderKey];
if(!spirvShader)
{
shader = createShader(shaderKey, module, robustBufferAccess, device->getDebuggerContext());
pipelineCache.insert(shaderKey, shader);
}
else
{
shader = *spirvShader;
}
}
{
const PipelineCache::ComputeProgramKey programKey(shader.get(), layout); const PipelineCache::ComputeProgramKey programKey(shader.get(), layout);
std::unique_lock<std::mutex> lock(pipelineCache.getProgramMutex()); program = pPipelineCache->getOrCreateComputeProgram(programKey, [&] {
const std::shared_ptr<sw::ComputeProgram> *computeProgram = pipelineCache[programKey]; return createProgram(programKey);
if(!computeProgram) });
{
program = createProgram(programKey);
pipelineCache.insert(programKey, program);
}
else
{
program = *computeProgram;
}
}
} }
else else
{ {
......
...@@ -224,12 +224,14 @@ VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache *pSr ...@@ -224,12 +224,14 @@ VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache *pSr
PipelineCache *srcCache = Cast(pSrcCaches[i]); PipelineCache *srcCache = Cast(pSrcCaches[i]);
{ {
std::unique_lock<std::mutex> lock(spirvShadersMutex); std::unique_lock<std::mutex> thisLock(spirvShadersMutex);
std::unique_lock<std::mutex> srcLock(srcCache->spirvShadersMutex);
spirvShaders.insert(srcCache->spirvShaders.begin(), srcCache->spirvShaders.end()); spirvShaders.insert(srcCache->spirvShaders.begin(), srcCache->spirvShaders.end());
} }
{ {
std::unique_lock<std::mutex> lock(computeProgramsMutex); std::unique_lock<std::mutex> thisLock(computeProgramsMutex);
std::unique_lock<std::mutex> srcLock(srcCache->computeProgramsMutex);
computePrograms.insert(srcCache->computePrograms.begin(), srcCache->computePrograms.end()); computePrograms.insert(srcCache->computePrograms.begin(), srcCache->computePrograms.end());
} }
} }
...@@ -237,26 +239,4 @@ VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache *pSr ...@@ -237,26 +239,4 @@ VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache *pSr
return VK_SUCCESS; return VK_SUCCESS;
} }
const std::shared_ptr<sw::SpirvShader> *PipelineCache::operator[](const PipelineCache::SpirvShaderKey &key) const
{
auto it = spirvShaders.find(key);
return (it != spirvShaders.end()) ? &(it->second) : nullptr;
}
void PipelineCache::insert(const PipelineCache::SpirvShaderKey &key, const std::shared_ptr<sw::SpirvShader> &shader)
{
spirvShaders[key] = shader;
}
const std::shared_ptr<sw::ComputeProgram> *PipelineCache::operator[](const PipelineCache::ComputeProgramKey &key) const
{
auto it = computePrograms.find(key);
return (it != computePrograms.end()) ? &(it->second) : nullptr;
}
void PipelineCache::insert(const PipelineCache::ComputeProgramKey &key, const std::shared_ptr<sw::ComputeProgram> &computeProgram)
{
computePrograms[key] = computeProgram;
}
} // namespace vk } // namespace vk
...@@ -93,9 +93,13 @@ public: ...@@ -93,9 +93,13 @@ public:
const SpecializationInfo specializationInfo; const SpecializationInfo specializationInfo;
}; };
std::mutex &getShaderMutex() { return spirvShadersMutex; } // getOrCreateShader() queries the cache for a shader with the given key.
const std::shared_ptr<sw::SpirvShader> *operator[](const PipelineCache::SpirvShaderKey &key) const; // If one is found, it is returned, otherwise create() is called, the
void insert(const PipelineCache::SpirvShaderKey &key, const std::shared_ptr<sw::SpirvShader> &shader); // returned shader is added to the cache, and it is returned.
// Function must be a function of the signature:
// std::shared_ptr<sw::SpirvShader>()
template<typename Function>
inline std::shared_ptr<sw::SpirvShader> getOrCreateShader(const PipelineCache::SpirvShaderKey &key, Function &&create);
struct ComputeProgramKey struct ComputeProgramKey
{ {
...@@ -117,9 +121,14 @@ public: ...@@ -117,9 +121,14 @@ public:
const vk::PipelineLayout *layout; const vk::PipelineLayout *layout;
}; };
std::mutex &getProgramMutex() { return computeProgramsMutex; } // getOrCreateComputeProgram() queries the cache for a compute program with
const std::shared_ptr<sw::ComputeProgram> *operator[](const PipelineCache::ComputeProgramKey &key) const; // the given key.
void insert(const PipelineCache::ComputeProgramKey &key, const std::shared_ptr<sw::ComputeProgram> &computeProgram); // If one is found, it is returned, otherwise create() is called, the
// returned program is added to the cache, and it is returned.
// Function must be a function of the signature:
// std::shared_ptr<sw::ComputeProgram>()
template<typename Function>
inline std::shared_ptr<sw::ComputeProgram> getOrCreateComputeProgram(const PipelineCache::ComputeProgramKey &key, Function &&create);
private: private:
struct CacheHeader struct CacheHeader
...@@ -135,10 +144,10 @@ private: ...@@ -135,10 +144,10 @@ private:
uint8_t *data = nullptr; uint8_t *data = nullptr;
std::mutex spirvShadersMutex; std::mutex spirvShadersMutex;
std::map<SpirvShaderKey, std::shared_ptr<sw::SpirvShader>> spirvShaders; std::map<SpirvShaderKey, std::shared_ptr<sw::SpirvShader>> spirvShaders; // guarded by spirvShadersMutex
std::mutex computeProgramsMutex; std::mutex computeProgramsMutex;
std::map<ComputeProgramKey, std::shared_ptr<sw::ComputeProgram>> computePrograms; std::map<ComputeProgramKey, std::shared_ptr<sw::ComputeProgram>> computePrograms; // guarded by computeProgramsMutex
}; };
static inline PipelineCache *Cast(VkPipelineCache object) static inline PipelineCache *Cast(VkPipelineCache object)
...@@ -146,6 +155,32 @@ static inline PipelineCache *Cast(VkPipelineCache object) ...@@ -146,6 +155,32 @@ static inline PipelineCache *Cast(VkPipelineCache object)
return PipelineCache::Cast(object); return PipelineCache::Cast(object);
} }
template<typename Function>
std::shared_ptr<sw::ComputeProgram> PipelineCache::getOrCreateComputeProgram(const PipelineCache::ComputeProgramKey &key, Function &&create)
{
std::unique_lock<std::mutex> lock(computeProgramsMutex);
auto it = computePrograms.find(key);
if(it != computePrograms.end()) { return it->second; }
auto created = create();
computePrograms.emplace(key, created);
return created;
}
template<typename Function>
std::shared_ptr<sw::SpirvShader> PipelineCache::getOrCreateShader(const PipelineCache::SpirvShaderKey &key, Function &&create)
{
std::unique_lock<std::mutex> lock(spirvShadersMutex);
auto it = spirvShaders.find(key);
if(it != spirvShaders.end()) { return it->second; }
auto created = create();
spirvShaders.emplace(key, created);
return created;
}
} // namespace vk } // namespace vk
#endif // VK_PIPELINE_CACHE_HPP_ #endif // VK_PIPELINE_CACHE_HPP_
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment