Commit 05459e06 by Tobin Ehlis Committed by Commit Bot

Vulkan: Adding custom pool allocator

Copied pool allocator used by compiler to common and hooking it up as custom allocator for CommandPools. Modified it to support reallocation. RendererVk now has a private poolAllocator and VkAllocationCallbacks struct. The allocation callbacks are initialized to static functions in RendererVk::initializeDevice() and then passed to CommandPool init() and destroy() functions. Using the pool allocator saves Command Pool/Buffer clean-up time which was showing us as a bottleneck is some cases. Bug: angleproject:2951 Change-Id: I81aa8a7ec60397676fa722d6435029db27947ef4 Reviewed-on: https://chromium-review.googlesource.com/c/1409867 Commit-Queue: Tobin Ehlis <tobine@google.com> Reviewed-by: 's avatarShahbaz Youssefi <syoussefi@chromium.org> Reviewed-by: 's avatarJamie Madill <jmadill@chromium.org>
parent c81e7bfe
...@@ -166,7 +166,7 @@ angle_static_library("preprocessor") { ...@@ -166,7 +166,7 @@ angle_static_library("preprocessor") {
} }
config("translator_disable_pool_alloc") { config("translator_disable_pool_alloc") {
defines = [ "ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC" ] defines = [ "ANGLE_DISABLE_POOL_ALLOC" ]
} }
config("debug_annotations_config") { config("debug_annotations_config") {
......
//
// Copyright 2019 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// PoolAlloc.h:
// Defines the class interface for PoolAllocator and the Allocation
// class that it uses internally.
//
#ifndef POOLALLOC_H_
#define POOLALLOC_H_
#ifdef _DEBUG
# define GUARD_BLOCKS // define to enable guard block sanity checking
#endif
//
// This header defines an allocator that can be used to efficiently
// allocate a large number of small requests for heap memory, with the
// intention that they are not individually deallocated, but rather
// collectively deallocated at one time.
//
// This simultaneously
//
// * Makes each individual allocation much more efficient; the
// typical allocation is trivial.
// * Completely avoids the cost of doing individual deallocation.
// * Saves the trouble of tracking down and plugging a large class of leaks.
//
// Individual classes can use this allocator by supplying their own
// new and delete methods.
//
#include <stddef.h>
#include <string.h>
#include <vector>
#include "angleutils.h"
namespace angle
{
// If we are using guard blocks, we must track each individual
// allocation. If we aren't using guard blocks, these
// never get instantiated, so won't have any impact.
//
class Allocation
{
public:
Allocation(size_t size, unsigned char *mem, Allocation *prev = 0)
: size(size), mem(mem), prevAlloc(prev)
{
// Allocations are bracketed:
// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
// This would be cleaner with if (kGuardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
#ifdef GUARD_BLOCKS
memset(preGuard(), kGuardBlockBeginVal, kGuardBlockSize);
memset(data(), kUserDataFill, size);
memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
#endif
}
void check() const
{
checkGuardBlock(preGuard(), kGuardBlockBeginVal, "before");
checkGuardBlock(postGuard(), kGuardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accomodate user buffer of 'size',
// plus our tracking data.
inline static size_t allocationSize(size_t size)
{
return size + 2 * kGuardBlockSize + headerSize();
}
// Offset from surrounding buffer to get to user data buffer.
inline static unsigned char *offsetAllocation(unsigned char *m)
{
return m + kGuardBlockSize + headerSize();
}
// Return size of allocation.
size_t getSize() const { return size; }
// Set size of allocation
void setSize(size_t newSize)
{
size = newSize;
#ifdef GUARD_BLOCKS
// Push post-guard block back now that size is updated
memset(postGuard(), kGuardBlockEndVal, kGuardBlockSize);
#endif
}
private:
void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char *preGuard() const { return mem + headerSize(); }
unsigned char *data() const { return preGuard() + kGuardBlockSize; }
unsigned char *postGuard() const { return data() + size; }
size_t size; // size of the user data area
unsigned char *mem; // beginning of our allocation (pts to header)
Allocation *prevAlloc; // prior allocation in the chain
static constexpr unsigned char kGuardBlockBeginVal = 0xfb;
static constexpr unsigned char kGuardBlockEndVal = 0xfe;
static constexpr unsigned char kUserDataFill = 0xcd;
#ifdef GUARD_BLOCKS
static constexpr size_t kGuardBlockSize = 16;
#else
static constexpr size_t kGuardBlockSize = 0;
#endif
inline static size_t headerSize() { return sizeof(Allocation); }
};
//
// There are several stacks. One is to track the pushing and popping
// of the user, and not yet implemented. The others are simply a
// repositories of free pages or used pages.
//
// Page stacks are linked together with a simple header at the beginning
// of each allocation obtained from the underlying OS. Multi-page allocations
// are returned to the OS. Individual page allocations are kept for future
// re-use.
//
// The "page size" used is not, nor must it match, the underlying OS
// page size. But, having it be about that size or equal to a set of
// pages is likely most optimal.
//
class PoolAllocator : angle::NonCopyable
{
public:
static const int kDefaultAlignment = 16;
PoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = kDefaultAlignment);
PoolAllocator(PoolAllocator &&rhs) noexcept;
PoolAllocator &operator=(PoolAllocator &&);
//
// Don't call the destructor just to free up the memory, call pop()
//
~PoolAllocator();
//
// Call push() to establish a new place to pop memory to. Does not
// have to be called to get things started.
//
void push();
//
// Call pop() to free all memory allocated since the last call to push(),
// or if no last call to push, frees all memory since first allocation.
//
void pop();
//
// Call popAll() to free all memory allocated.
//
void popAll();
//
// Call allocate() to actually acquire memory. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory.
//
void *allocate(size_t numBytes);
//
// Call reallocate() to resize a previous allocation. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory
// where any contents from the original allocation will be preserved.
//
void *reallocate(void *originalAllocation, size_t numBytes);
//
// There is no deallocate. The point of this class is that
// deallocation can be skipped by the user of it, as the model
// of use is to simultaneously deallocate everything at once
// by calling pop(), and to not have to solve memory leak problems.
//
// Catch unwanted allocations.
// TODO(jmadill): Remove this when we remove the global allocator.
void lock();
void unlock();
private:
size_t alignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2
size_t alignmentMask;
#if !defined(ANGLE_DISABLE_POOL_ALLOC)
friend struct tHeader;
struct tHeader
{
tHeader(tHeader *nextPage, size_t pageCount)
: nextPage(nextPage),
pageCount(pageCount)
# ifdef GUARD_BLOCKS
,
lastAllocation(0)
# endif
{}
~tHeader()
{
# ifdef GUARD_BLOCKS
if (lastAllocation)
lastAllocation->checkAllocList();
# endif
}
tHeader *nextPage;
size_t pageCount;
Allocation *lastAllocation;
};
struct tAllocState
{
size_t offset;
tHeader *page;
};
typedef std::vector<tAllocState> tAllocStack;
// Track allocations if and only if we're using guard blocks
void *initializeAllocation(tHeader *block, unsigned char *memory, size_t numBytes)
{
// Init Allocation by default for reallocation support.
new (memory) Allocation(numBytes, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<Allocation *>(memory);
return Allocation::offsetAllocation(memory);
}
Allocation *getAllocationHeader(void *memAllocation) const;
size_t pageSize; // granularity of allocation from the OS
size_t headerSkip; // amount of memory to skip to make room for the
// header (basically, size of header, rounded
// up to make it aligned
size_t currentPageOffset; // next offset in top of inUseList to allocate from
tHeader *freeList; // list of popped memory
tHeader *inUseList; // list of all memory currently being used
tAllocStack mStack; // stack of where to allocate from, to partition pool
int numCalls; // just an interesting statistic
size_t totalBytes; // just an interesting statistic
#else // !defined(ANGLE_DISABLE_POOL_ALLOC)
std::vector<std::vector<void *>> mStack;
#endif
bool mLocked;
};
} // namespace angle
#endif // POOLALLOC_H_
...@@ -170,7 +170,7 @@ namespace ...@@ -170,7 +170,7 @@ namespace
class TScopedPoolAllocator class TScopedPoolAllocator
{ {
public: public:
TScopedPoolAllocator(TPoolAllocator *allocator) : mAllocator(allocator) TScopedPoolAllocator(angle::PoolAllocator *allocator) : mAllocator(allocator)
{ {
mAllocator->push(); mAllocator->push();
SetGlobalPoolAllocator(mAllocator); SetGlobalPoolAllocator(mAllocator);
...@@ -182,7 +182,7 @@ class TScopedPoolAllocator ...@@ -182,7 +182,7 @@ class TScopedPoolAllocator
} }
private: private:
TPoolAllocator *mAllocator; angle::PoolAllocator *mAllocator;
}; };
class TScopedSymbolTableLevel class TScopedSymbolTableLevel
......
...@@ -66,7 +66,7 @@ class TShHandleBase ...@@ -66,7 +66,7 @@ class TShHandleBase
protected: protected:
// Memory allocator. Allocates and tracks memory required by the compiler. // Memory allocator. Allocates and tracks memory required by the compiler.
// Deallocates all memory when compiler is destructed. // Deallocates all memory when compiler is destructed.
TPoolAllocator allocator; angle::PoolAllocator allocator;
}; };
// //
......
...@@ -7,14 +7,7 @@ ...@@ -7,14 +7,7 @@
#include "compiler/translator/PoolAlloc.h" #include "compiler/translator/PoolAlloc.h"
#include <assert.h> #include <assert.h>
#include <stdint.h>
#include <stdio.h>
#include "common/angleutils.h"
#include "common/debug.h"
#include "common/platform.h"
#include "common/tls.h" #include "common/tls.h"
#include "compiler/translator/InitializeGlobals.h"
TLSIndex PoolIndex = TLS_INVALID_INDEX; TLSIndex PoolIndex = TLS_INVALID_INDEX;
...@@ -34,327 +27,14 @@ void FreePoolIndex() ...@@ -34,327 +27,14 @@ void FreePoolIndex()
PoolIndex = TLS_INVALID_INDEX; PoolIndex = TLS_INVALID_INDEX;
} }
TPoolAllocator *GetGlobalPoolAllocator() angle::PoolAllocator *GetGlobalPoolAllocator()
{ {
assert(PoolIndex != TLS_INVALID_INDEX); assert(PoolIndex != TLS_INVALID_INDEX);
return static_cast<TPoolAllocator *>(GetTLSValue(PoolIndex)); return static_cast<angle::PoolAllocator *>(GetTLSValue(PoolIndex));
} }
void SetGlobalPoolAllocator(TPoolAllocator *poolAllocator) void SetGlobalPoolAllocator(angle::PoolAllocator *poolAllocator)
{ {
assert(PoolIndex != TLS_INVALID_INDEX); assert(PoolIndex != TLS_INVALID_INDEX);
SetTLSValue(PoolIndex, poolAllocator); SetTLSValue(PoolIndex, poolAllocator);
} }
//
// Implement the functionality of the TPoolAllocator class, which
// is documented in PoolAlloc.h.
//
TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment)
: alignment(allocationAlignment),
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
pageSize(growthIncrement),
freeList(0),
inUseList(0),
numCalls(0),
totalBytes(0),
#endif
mLocked(false)
{
//
// Adjust alignment to be at least pointer aligned and
// power of 2.
//
size_t minAlign = sizeof(void *);
alignment &= ~(minAlign - 1);
if (alignment < minAlign)
alignment = minAlign;
size_t a = 1;
while (a < alignment)
a <<= 1;
alignment = a;
alignmentMask = a - 1;
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
//
// Don't allow page sizes we know are smaller than all common
// OS page sizes.
//
if (pageSize < 4 * 1024)
pageSize = 4 * 1024;
//
// A large currentPageOffset indicates a new page needs to
// be obtained to allocate memory.
//
currentPageOffset = pageSize;
//
// Align header skip
//
headerSkip = minAlign;
if (headerSkip < sizeof(tHeader))
{
headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
}
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
mStack.push_back({});
#endif
}
TPoolAllocator::~TPoolAllocator()
{
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
while (inUseList)
{
tHeader *next = inUseList->nextPage;
inUseList->~tHeader();
delete[] reinterpret_cast<char *>(inUseList);
inUseList = next;
}
// We should not check the guard blocks
// here, because we did it already when the block was
// placed into the free list.
//
while (freeList)
{
tHeader *next = freeList->nextPage;
delete[] reinterpret_cast<char *>(freeList);
freeList = next;
}
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
for (auto &allocs : mStack)
{
for (auto alloc : allocs)
{
free(alloc);
}
}
mStack.clear();
#endif
}
// Support MSVC++ 6.0
const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
const unsigned char TAllocation::guardBlockEndVal = 0xfe;
const unsigned char TAllocation::userDataFill = 0xcd;
#ifdef GUARD_BLOCKS
const size_t TAllocation::guardBlockSize = 16;
#else
const size_t TAllocation::guardBlockSize = 0;
#endif
//
// Check a single guard block for damage
//
void TAllocation::checkGuardBlock(unsigned char *blockMem,
unsigned char val,
const char *locText) const
{
#ifdef GUARD_BLOCKS
for (size_t x = 0; x < guardBlockSize; x++)
{
if (blockMem[x] != val)
{
char assertMsg[80];
// We don't print the assert message. It's here just to be helpful.
# if defined(_MSC_VER)
snprintf(assertMsg, sizeof(assertMsg),
"PoolAlloc: Damage %s %Iu byte allocation at 0x%p\n", locText, size, data());
# else
snprintf(assertMsg, sizeof(assertMsg),
"PoolAlloc: Damage %s %zu byte allocation at 0x%p\n", locText, size, data());
# endif
assert(0 && "PoolAlloc: Damage in guard block");
}
}
#endif
}
void TPoolAllocator::push()
{
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
tAllocState state = {currentPageOffset, inUseList};
mStack.push_back(state);
//
// Indicate there is no current page to allocate from.
//
currentPageOffset = pageSize;
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
mStack.push_back({});
#endif
}
//
// Do a mass-deallocation of all the individual allocations
// that have occurred since the last push(), or since the
// last pop(), or since the object's creation.
//
// The deallocated pages are saved for future allocations.
//
void TPoolAllocator::pop()
{
if (mStack.size() < 1)
return;
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
tHeader *page = mStack.back().page;
currentPageOffset = mStack.back().offset;
while (inUseList != page)
{
// invoke destructor to free allocation list
inUseList->~tHeader();
tHeader *nextInUse = inUseList->nextPage;
if (inUseList->pageCount > 1)
delete[] reinterpret_cast<char *>(inUseList);
else
{
inUseList->nextPage = freeList;
freeList = inUseList;
}
inUseList = nextInUse;
}
mStack.pop_back();
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
for (auto &alloc : mStack.back())
{
free(alloc);
}
mStack.pop_back();
#endif
}
//
// Do a mass-deallocation of all the individual allocations
// that have occurred.
//
void TPoolAllocator::popAll()
{
while (mStack.size() > 0)
pop();
}
void *TPoolAllocator::allocate(size_t numBytes)
{
ASSERT(!mLocked);
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
//
// Just keep some interesting statistics.
//
++numCalls;
totalBytes += numBytes;
// If we are using guard blocks, all allocations are bracketed by
// them: [guardblock][allocation][guardblock]. numBytes is how
// much memory the caller asked for. allocationSize is the total
// size including guard blocks. In release build,
// guardBlockSize=0 and this all gets optimized away.
size_t allocationSize = TAllocation::allocationSize(numBytes);
// Detect integer overflow.
if (allocationSize < numBytes)
return 0;
//
// Do the allocation, most likely case first, for efficiency.
// This step could be moved to be inline sometime.
//
if (allocationSize <= pageSize - currentPageOffset)
{
//
// Safe to allocate from currentPageOffset.
//
unsigned char *memory = reinterpret_cast<unsigned char *>(inUseList) + currentPageOffset;
currentPageOffset += allocationSize;
currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
return initializeAllocation(inUseList, memory, numBytes);
}
if (allocationSize > pageSize - headerSkip)
{
//
// Do a multi-page allocation. Don't mix these with the others.
// The OS is efficient and allocating and free-ing multiple pages.
//
size_t numBytesToAlloc = allocationSize + headerSkip;
// Detect integer overflow.
if (numBytesToAlloc < allocationSize)
return 0;
tHeader *memory = reinterpret_cast<tHeader *>(::new char[numBytesToAlloc]);
if (memory == 0)
return 0;
// Use placement-new to initialize header
new (memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
inUseList = memory;
currentPageOffset = pageSize; // make next allocation come from a new page
// No guard blocks for multi-page allocations (yet)
return reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(memory) + headerSkip);
}
//
// Need a simple page to allocate from.
//
tHeader *memory;
if (freeList)
{
memory = freeList;
freeList = freeList->nextPage;
}
else
{
memory = reinterpret_cast<tHeader *>(::new char[pageSize]);
if (memory == 0)
return 0;
}
// Use placement-new to initialize header
new (memory) tHeader(inUseList, 1);
inUseList = memory;
unsigned char *ret = reinterpret_cast<unsigned char *>(inUseList) + headerSkip;
currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
return initializeAllocation(inUseList, ret, numBytes);
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
void *alloc = malloc(numBytes + alignmentMask);
mStack.back().push_back(alloc);
intptr_t intAlloc = reinterpret_cast<intptr_t>(alloc);
intAlloc = (intAlloc + alignmentMask) & ~alignmentMask;
return reinterpret_cast<void *>(intAlloc);
#endif
}
void TPoolAllocator::lock()
{
ASSERT(!mLocked);
mLocked = true;
}
void TPoolAllocator::unlock()
{
ASSERT(mLocked);
mLocked = false;
}
//
// Check all allocations in a list for damage by calling check on each.
//
void TAllocation::checkAllocList() const
{
for (const TAllocation *alloc = this; alloc != 0; alloc = alloc->prevAlloc)
alloc->check();
}
...@@ -12,237 +12,26 @@ ...@@ -12,237 +12,26 @@
#endif #endif
// //
// This header defines an allocator that can be used to efficiently // This header defines the pool_allocator class that allows STL containers
// allocate a large number of small requests for heap memory, with the // to use the angle::PoolAllocator class by using the pool_allocator
// intention that they are not individually deallocated, but rather
// collectively deallocated at one time.
//
// This simultaneously
//
// * Makes each individual allocation much more efficient; the
// typical allocation is trivial.
// * Completely avoids the cost of doing individual deallocation.
// * Saves the trouble of tracking down and plugging a large class of leaks.
//
// Individual classes can use this allocator by supplying their own
// new and delete methods.
//
// STL containers can use this allocator by using the pool_allocator
// class as the allocator (second) template argument. // class as the allocator (second) template argument.
// //
// It also defines functions for managing the GlobalPoolAllocator used by the compiler.
//
#include <stddef.h> #include <stddef.h>
#include <string.h> #include <string.h>
#include <vector> #include <vector>
// If we are using guard blocks, we must track each indivual #include "common/PoolAlloc.h"
// allocation. If we aren't using guard blocks, these
// never get instantiated, so won't have any impact.
//
class TAllocation
{
public:
TAllocation(size_t size, unsigned char *mem, TAllocation *prev = 0)
: size(size), mem(mem), prevAlloc(prev)
{
// Allocations are bracketed:
// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
// This would be cleaner with if (guardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
#ifdef GUARD_BLOCKS
memset(preGuard(), guardBlockBeginVal, guardBlockSize);
memset(data(), userDataFill, size);
memset(postGuard(), guardBlockEndVal, guardBlockSize);
#endif
}
void check() const
{
checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
checkGuardBlock(postGuard(), guardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accomodate user buffer of 'size',
// plus our tracking data.
inline static size_t allocationSize(size_t size)
{
return size + 2 * guardBlockSize + headerSize();
}
// Offset from surrounding buffer to get to user data buffer.
inline static unsigned char *offsetAllocation(unsigned char *m)
{
return m + guardBlockSize + headerSize();
}
private:
void checkGuardBlock(unsigned char *blockMem, unsigned char val, const char *locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char *preGuard() const { return mem + headerSize(); }
unsigned char *data() const { return preGuard() + guardBlockSize; }
unsigned char *postGuard() const { return data() + size; }
size_t size; // size of the user data area
unsigned char *mem; // beginning of our allocation (pts to header)
TAllocation *prevAlloc; // prior allocation in the chain
// Support MSVC++ 6.0
const static unsigned char guardBlockBeginVal;
const static unsigned char guardBlockEndVal;
const static unsigned char userDataFill;
const static size_t guardBlockSize;
#ifdef GUARD_BLOCKS
inline static size_t headerSize() { return sizeof(TAllocation); }
#else
inline static size_t headerSize() { return 0; }
#endif
};
//
// There are several stacks. One is to track the pushing and popping
// of the user, and not yet implemented. The others are simply a
// repositories of free pages or used pages.
//
// Page stacks are linked together with a simple header at the beginning
// of each allocation obtained from the underlying OS. Multi-page allocations
// are returned to the OS. Individual page allocations are kept for future
// re-use.
//
// The "page size" used is not, nor must it match, the underlying OS
// page size. But, having it be about that size or equal to a set of
// pages is likely most optimal.
//
class TPoolAllocator
{
public:
TPoolAllocator(int growthIncrement = 8 * 1024, int allocationAlignment = 16);
//
// Don't call the destructor just to free up the memory, call pop()
//
~TPoolAllocator();
//
// Call push() to establish a new place to pop memory too. Does not
// have to be called to get things started.
//
void push();
//
// Call pop() to free all memory allocated since the last call to push(),
// or if no last call to push, frees all memory since first allocation.
//
void pop();
//
// Call popAll() to free all memory allocated.
//
void popAll();
//
// Call allocate() to actually acquire memory. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory.
//
void *allocate(size_t numBytes);
//
// There is no deallocate. The point of this class is that
// deallocation can be skipped by the user of it, as the model
// of use is to simultaneously deallocate everything at once
// by calling pop(), and to not have to solve memory leak problems.
//
// Catch unwanted allocations.
// TODO(jmadill): Remove this when we remove the global allocator.
void lock();
void unlock();
private:
size_t alignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2
size_t alignmentMask;
#if !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
friend struct tHeader;
struct tHeader
{
tHeader(tHeader *nextPage, size_t pageCount)
: nextPage(nextPage),
pageCount(pageCount)
# ifdef GUARD_BLOCKS
,
lastAllocation(0)
# endif
{}
~tHeader()
{
# ifdef GUARD_BLOCKS
if (lastAllocation)
lastAllocation->checkAllocList();
# endif
}
tHeader *nextPage;
size_t pageCount;
# ifdef GUARD_BLOCKS
TAllocation *lastAllocation;
# endif
};
struct tAllocState
{
size_t offset;
tHeader *page;
};
typedef std::vector<tAllocState> tAllocStack;
// Track allocations if and only if we're using guard blocks
void *initializeAllocation(tHeader *block, unsigned char *memory, size_t numBytes)
{
# ifdef GUARD_BLOCKS
new (memory) TAllocation(numBytes, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<TAllocation *>(memory);
# endif
// This is optimized entirely away if GUARD_BLOCKS is not defined.
return TAllocation::offsetAllocation(memory);
}
size_t pageSize; // granularity of allocation from the OS
size_t headerSkip; // amount of memory to skip to make room for the
// header (basically, size of header, rounded
// up to make it aligned
size_t currentPageOffset; // next offset in top of inUseList to allocate from
tHeader *freeList; // list of popped memory
tHeader *inUseList; // list of all memory currently being used
tAllocStack mStack; // stack of where to allocate from, to partition pool
int numCalls; // just an interesting statistic
size_t totalBytes; // just an interesting statistic
#else // !defined(ANGLE_TRANSLATOR_DISABLE_POOL_ALLOC)
std::vector<std::vector<void *>> mStack;
#endif
TPoolAllocator &operator=(const TPoolAllocator &); // dont allow assignment operator
TPoolAllocator(const TPoolAllocator &); // dont allow default copy constructor
bool mLocked;
};
// //
// There could potentially be many pools with pops happening at // There could potentially be many pools with pops happening at
// different times. But a simple use is to have a global pop // different times. But a simple use is to have a global pop
// with everyone using the same global allocator. // with everyone using the same global allocator.
// //
extern TPoolAllocator *GetGlobalPoolAllocator(); extern angle::PoolAllocator *GetGlobalPoolAllocator();
extern void SetGlobalPoolAllocator(TPoolAllocator *poolAllocator); extern void SetGlobalPoolAllocator(angle::PoolAllocator *poolAllocator);
// //
// This STL compatible allocator is intended to be used as the allocator // This STL compatible allocator is intended to be used as the allocator
...@@ -311,7 +100,7 @@ class pool_allocator ...@@ -311,7 +100,7 @@ class pool_allocator
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); } size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
size_type max_size(int size) const { return static_cast<size_type>(-1) / size; } size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
TPoolAllocator &getAllocator() const { return *GetGlobalPoolAllocator(); } angle::PoolAllocator &getAllocator() const { return *GetGlobalPoolAllocator(); }
}; };
#endif // COMPILER_TRANSLATOR_POOLALLOC_H_ #endif // COMPILER_TRANSLATOR_POOLALLOC_H_
...@@ -458,6 +458,54 @@ void ChoosePhysicalDevice(const std::vector<VkPhysicalDevice> &physicalDevices, ...@@ -458,6 +458,54 @@ void ChoosePhysicalDevice(const std::vector<VkPhysicalDevice> &physicalDevices,
// Initially dumping the command graphs is disabled. // Initially dumping the command graphs is disabled.
constexpr bool kEnableCommandGraphDiagnostics = false; constexpr bool kEnableCommandGraphDiagnostics = false;
// Custom allocation functions
VKAPI_ATTR void *VKAPI_CALL PoolAllocationFunction(void *pUserData,
size_t size,
size_t alignment,
VkSystemAllocationScope allocationScope)
{
angle::PoolAllocator *poolAllocator = static_cast<angle::PoolAllocator *>(pUserData);
ASSERT((angle::PoolAllocator::kDefaultAlignment % alignment) == 0);
return poolAllocator->allocate(size);
}
VKAPI_ATTR void *VKAPI_CALL PoolReallocationFunction(void *pUserData,
void *pOriginal,
size_t size,
size_t alignment,
VkSystemAllocationScope allocationScope)
{
angle::PoolAllocator *poolAllocator = static_cast<angle::PoolAllocator *>(pUserData);
return poolAllocator->reallocate(pOriginal, size);
}
VKAPI_ATTR void VKAPI_CALL PoolFreeFunction(void *pUserData, void *pMemory) {}
VKAPI_ATTR void VKAPI_CALL
PoolInternalAllocationNotification(void *pUserData,
size_t size,
VkInternalAllocationType allocationType,
VkSystemAllocationScope allocationScope)
{}
VKAPI_ATTR void VKAPI_CALL PoolInternalFreeNotification(void *pUserData,
size_t size,
VkInternalAllocationType allocationType,
VkSystemAllocationScope allocationScope)
{}
void InitPoolAllocationCallbacks(angle::PoolAllocator *poolAllocator,
VkAllocationCallbacks *allocationCallbacks)
{
allocationCallbacks->pUserData = static_cast<void *>(poolAllocator);
allocationCallbacks->pfnAllocation = &PoolAllocationFunction;
allocationCallbacks->pfnReallocation = &PoolReallocationFunction;
allocationCallbacks->pfnFree = &PoolFreeFunction;
allocationCallbacks->pfnInternalAllocation = &PoolInternalAllocationNotification;
allocationCallbacks->pfnInternalFree = &PoolInternalFreeNotification;
}
} // anonymous namespace } // anonymous namespace
// CommandBatch implementation. // CommandBatch implementation.
...@@ -466,20 +514,25 @@ RendererVk::CommandBatch::CommandBatch() = default; ...@@ -466,20 +514,25 @@ RendererVk::CommandBatch::CommandBatch() = default;
RendererVk::CommandBatch::~CommandBatch() = default; RendererVk::CommandBatch::~CommandBatch() = default;
RendererVk::CommandBatch::CommandBatch(CommandBatch &&other) RendererVk::CommandBatch::CommandBatch(CommandBatch &&other)
: commandPool(std::move(other.commandPool)), fence(std::move(other.fence)), serial(other.serial) : commandPool(std::move(other.commandPool)),
poolAllocator(std::move(other.poolAllocator)),
fence(std::move(other.fence)),
serial(other.serial)
{} {}
RendererVk::CommandBatch &RendererVk::CommandBatch::operator=(CommandBatch &&other) RendererVk::CommandBatch &RendererVk::CommandBatch::operator=(CommandBatch &&other)
{ {
std::swap(commandPool, other.commandPool); std::swap(commandPool, other.commandPool);
std::swap(poolAllocator, other.poolAllocator);
std::swap(fence, other.fence); std::swap(fence, other.fence);
std::swap(serial, other.serial); std::swap(serial, other.serial);
return *this; return *this;
} }
void RendererVk::CommandBatch::destroy(VkDevice device) void RendererVk::CommandBatch::destroy(VkDevice device,
const VkAllocationCallbacks *allocationCallbacks)
{ {
commandPool.destroy(device); commandPool.destroy(device, allocationCallbacks);
fence.destroy(device); fence.destroy(device);
} }
...@@ -535,7 +588,7 @@ void RendererVk::onDestroy(vk::Context *context) ...@@ -535,7 +588,7 @@ void RendererVk::onDestroy(vk::Context *context)
if (mCommandPool.valid()) if (mCommandPool.valid())
{ {
mCommandPool.destroy(mDevice); mCommandPool.destroy(mDevice, &mAllocationCallbacks);
} }
if (mDevice) if (mDevice)
...@@ -877,13 +930,14 @@ angle::Result RendererVk::initializeDevice(DisplayVk *displayVk, uint32_t queueF ...@@ -877,13 +930,14 @@ angle::Result RendererVk::initializeDevice(DisplayVk *displayVk, uint32_t queueF
vkGetDeviceQueue(mDevice, mCurrentQueueFamilyIndex, 0, &mQueue); vkGetDeviceQueue(mDevice, mCurrentQueueFamilyIndex, 0, &mQueue);
InitPoolAllocationCallbacks(&mPoolAllocator, &mAllocationCallbacks);
// Initialize the command pool now that we know the queue family index. // Initialize the command pool now that we know the queue family index.
VkCommandPoolCreateInfo commandPoolInfo = {}; VkCommandPoolCreateInfo commandPoolInfo = {};
commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; commandPoolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT; commandPoolInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
commandPoolInfo.queueFamilyIndex = mCurrentQueueFamilyIndex; commandPoolInfo.queueFamilyIndex = mCurrentQueueFamilyIndex;
ANGLE_VK_TRY(displayVk, mCommandPool.init(mDevice, commandPoolInfo)); ANGLE_VK_TRY(displayVk, mCommandPool.init(mDevice, commandPoolInfo, &mAllocationCallbacks));
// Initialize the vulkan pipeline cache. // Initialize the vulkan pipeline cache.
ANGLE_TRY(initPipelineCache(displayVk)); ANGLE_TRY(initPipelineCache(displayVk));
...@@ -1234,7 +1288,7 @@ void RendererVk::freeAllInFlightResources() ...@@ -1234,7 +1288,7 @@ void RendererVk::freeAllInFlightResources()
ASSERT(status == VK_SUCCESS || status == VK_ERROR_DEVICE_LOST); ASSERT(status == VK_SUCCESS || status == VK_ERROR_DEVICE_LOST);
} }
batch.fence.destroy(mDevice); batch.fence.destroy(mDevice);
batch.commandPool.destroy(mDevice); batch.commandPool.destroy(mDevice, &mAllocationCallbacks);
} }
mInFlightCommands.clear(); mInFlightCommands.clear();
...@@ -1264,7 +1318,7 @@ angle::Result RendererVk::checkCompletedCommands(vk::Context *context) ...@@ -1264,7 +1318,7 @@ angle::Result RendererVk::checkCompletedCommands(vk::Context *context)
mLastCompletedQueueSerial = batch.serial; mLastCompletedQueueSerial = batch.serial;
batch.fence.destroy(mDevice); batch.fence.destroy(mDevice);
batch.commandPool.destroy(mDevice); batch.commandPool.destroy(mDevice, &mAllocationCallbacks);
++finishedCount; ++finishedCount;
} }
...@@ -1295,7 +1349,7 @@ angle::Result RendererVk::submitFrame(vk::Context *context, ...@@ -1295,7 +1349,7 @@ angle::Result RendererVk::submitFrame(vk::Context *context,
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.flags = 0; fenceInfo.flags = 0;
vk::Scoped<CommandBatch> scopedBatch(mDevice); vk::ScopedCustomAllocation<CommandBatch> scopedBatch(mDevice, &mAllocationCallbacks);
CommandBatch &batch = scopedBatch.get(); CommandBatch &batch = scopedBatch.get();
ANGLE_VK_TRY(context, batch.fence.init(mDevice, fenceInfo)); ANGLE_VK_TRY(context, batch.fence.init(mDevice, fenceInfo));
...@@ -1303,6 +1357,7 @@ angle::Result RendererVk::submitFrame(vk::Context *context, ...@@ -1303,6 +1357,7 @@ angle::Result RendererVk::submitFrame(vk::Context *context,
// Store this command buffer in the in-flight list. // Store this command buffer in the in-flight list.
batch.commandPool = std::move(mCommandPool); batch.commandPool = std::move(mCommandPool);
batch.poolAllocator = std::move(mPoolAllocator);
batch.serial = mCurrentQueueSerial; batch.serial = mCurrentQueueSerial;
mInFlightCommands.emplace_back(scopedBatch.release()); mInFlightCommands.emplace_back(scopedBatch.release());
...@@ -1327,12 +1382,13 @@ angle::Result RendererVk::submitFrame(vk::Context *context, ...@@ -1327,12 +1382,13 @@ angle::Result RendererVk::submitFrame(vk::Context *context,
// Reallocate the command pool for next frame. // Reallocate the command pool for next frame.
// TODO(jmadill): Consider reusing command pools. // TODO(jmadill): Consider reusing command pools.
InitPoolAllocationCallbacks(&mPoolAllocator, &mAllocationCallbacks);
VkCommandPoolCreateInfo poolInfo = {}; VkCommandPoolCreateInfo poolInfo = {};
poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; poolInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
poolInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT; poolInfo.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT;
poolInfo.queueFamilyIndex = mCurrentQueueFamilyIndex; poolInfo.queueFamilyIndex = mCurrentQueueFamilyIndex;
ANGLE_VK_TRY(context, mCommandPool.init(mDevice, poolInfo)); ANGLE_VK_TRY(context, mCommandPool.init(mDevice, poolInfo, &mAllocationCallbacks));
return angle::Result::Continue; return angle::Result::Continue;
} }
...@@ -1675,7 +1731,7 @@ angle::Result RendererVk::synchronizeCpuGpuTime(vk::Context *context) ...@@ -1675,7 +1731,7 @@ angle::Result RendererVk::synchronizeCpuGpuTime(vk::Context *context)
// //
// Post-submission work Begin execution // Post-submission work Begin execution
// //
// ???? Write timstamp Tgpu // ???? Write timestamp Tgpu
// //
// ???? End execution // ???? End execution
// //
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <vulkan/vulkan.h> #include <vulkan/vulkan.h>
#include <memory> #include <memory>
#include "common/PoolAlloc.h"
#include "common/angleutils.h" #include "common/angleutils.h"
#include "libANGLE/BlobCache.h" #include "libANGLE/BlobCache.h"
#include "libANGLE/Caps.h" #include "libANGLE/Caps.h"
...@@ -255,6 +256,8 @@ class RendererVk : angle::NonCopyable ...@@ -255,6 +256,8 @@ class RendererVk : angle::NonCopyable
uint32_t mCurrentQueueFamilyIndex; uint32_t mCurrentQueueFamilyIndex;
VkDevice mDevice; VkDevice mDevice;
vk::CommandPool mCommandPool; vk::CommandPool mCommandPool;
angle::PoolAllocator mPoolAllocator;
VkAllocationCallbacks mAllocationCallbacks;
SerialFactory mQueueSerialFactory; SerialFactory mQueueSerialFactory;
SerialFactory mShaderSerialFactory; SerialFactory mShaderSerialFactory;
Serial mLastCompletedQueueSerial; Serial mLastCompletedQueueSerial;
...@@ -270,9 +273,10 @@ class RendererVk : angle::NonCopyable ...@@ -270,9 +273,10 @@ class RendererVk : angle::NonCopyable
CommandBatch(CommandBatch &&other); CommandBatch(CommandBatch &&other);
CommandBatch &operator=(CommandBatch &&other); CommandBatch &operator=(CommandBatch &&other);
void destroy(VkDevice device); void destroy(VkDevice device, const VkAllocationCallbacks *allocationCallbacks);
vk::CommandPool commandPool; vk::CommandPool commandPool;
angle::PoolAllocator poolAllocator;
vk::Fence fence; vk::Fence fence;
Serial serial; Serial serial;
}; };
......
...@@ -270,19 +270,21 @@ VkDevice Context::getDevice() const ...@@ -270,19 +270,21 @@ VkDevice Context::getDevice() const
// CommandPool implementation. // CommandPool implementation.
CommandPool::CommandPool() {} CommandPool::CommandPool() {}
void CommandPool::destroy(VkDevice device) void CommandPool::destroy(VkDevice device, const VkAllocationCallbacks *allocationCallbacks)
{ {
if (valid()) if (valid())
{ {
vkDestroyCommandPool(device, mHandle, nullptr); vkDestroyCommandPool(device, mHandle, allocationCallbacks);
mHandle = VK_NULL_HANDLE; mHandle = VK_NULL_HANDLE;
} }
} }
VkResult CommandPool::init(VkDevice device, const VkCommandPoolCreateInfo &createInfo) VkResult CommandPool::init(VkDevice device,
const VkCommandPoolCreateInfo &createInfo,
const VkAllocationCallbacks *allocationCallbacks)
{ {
ASSERT(!valid()); ASSERT(!valid());
return vkCreateCommandPool(device, &createInfo, nullptr, &mHandle); return vkCreateCommandPool(device, &createInfo, allocationCallbacks, &mHandle);
} }
// CommandBuffer implementation. // CommandBuffer implementation.
......
...@@ -290,9 +290,11 @@ class CommandPool final : public WrappedObject<CommandPool, VkCommandPool> ...@@ -290,9 +290,11 @@ class CommandPool final : public WrappedObject<CommandPool, VkCommandPool>
public: public:
CommandPool(); CommandPool();
void destroy(VkDevice device); void destroy(VkDevice device, const VkAllocationCallbacks *allocationCallbacks);
VkResult init(VkDevice device, const VkCommandPoolCreateInfo &createInfo); VkResult init(VkDevice device,
const VkCommandPoolCreateInfo &createInfo,
const VkAllocationCallbacks *allocationCallbacks);
}; };
class Pipeline final : public WrappedObject<Pipeline, VkPipeline> class Pipeline final : public WrappedObject<Pipeline, VkPipeline>
...@@ -317,7 +319,7 @@ class CommandBuffer : public WrappedObject<CommandBuffer, VkCommandBuffer> ...@@ -317,7 +319,7 @@ class CommandBuffer : public WrappedObject<CommandBuffer, VkCommandBuffer>
VkCommandBuffer releaseHandle(); VkCommandBuffer releaseHandle();
// This is used for normal pool allocated command buffers. It reset the handle. // This is used for normal pool allocated command buffers. It resets the handle.
void destroy(VkDevice device); void destroy(VkDevice device);
// This is used in conjunction with VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT. // This is used in conjunction with VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT.
...@@ -791,6 +793,28 @@ class Scoped final : angle::NonCopyable ...@@ -791,6 +793,28 @@ class Scoped final : angle::NonCopyable
T mVar; T mVar;
}; };
// Helper class to handle RAII patterns for initialization. Requires that T have a destroy method
// that takes a VkDevice & VkAllocationCallbacks ptr and returns void.
template <typename T>
class ScopedCustomAllocation final : angle::NonCopyable
{
public:
ScopedCustomAllocation(VkDevice device, const VkAllocationCallbacks *allocationCBs)
: mDevice(device), mAllocationCallbacks(allocationCBs)
{}
~ScopedCustomAllocation() { mVar.destroy(mDevice, mAllocationCallbacks); }
const T &get() const { return mVar; }
T &get() { return mVar; }
T &&release() { return std::move(mVar); }
private:
VkDevice mDevice;
const VkAllocationCallbacks *mAllocationCallbacks;
T mVar;
};
// This is a very simple RefCount class that has no autoreleasing. Used in the descriptor set and // This is a very simple RefCount class that has no autoreleasing. Used in the descriptor set and
// pipeline layout caches. // pipeline layout caches.
template <typename T> template <typename T>
......
...@@ -17,6 +17,8 @@ libangle_common_sources = [ ...@@ -17,6 +17,8 @@ libangle_common_sources = [
"src/common/PackedEnums.h", "src/common/PackedEnums.h",
"src/common/PackedGLEnums_autogen.cpp", "src/common/PackedGLEnums_autogen.cpp",
"src/common/PackedGLEnums_autogen.h", "src/common/PackedGLEnums_autogen.h",
"src/common/PoolAlloc.cpp",
"src/common/PoolAlloc.h",
"src/common/aligned_memory.cpp", "src/common/aligned_memory.cpp",
"src/common/aligned_memory.h", "src/common/aligned_memory.h",
"src/common/angleutils.cpp", "src/common/angleutils.cpp",
......
...@@ -30,7 +30,7 @@ class ImmutableStringBuilderTest : public testing::Test ...@@ -30,7 +30,7 @@ class ImmutableStringBuilderTest : public testing::Test
allocator.pop(); allocator.pop();
} }
TPoolAllocator allocator; angle::PoolAllocator allocator;
}; };
// Test writing a 32-bit signed int as hexadecimal using ImmutableStringBuilder. // Test writing a 32-bit signed int as hexadecimal using ImmutableStringBuilder.
......
...@@ -131,7 +131,7 @@ class IntermNodeTest : public testing::Test ...@@ -131,7 +131,7 @@ class IntermNodeTest : public testing::Test
} }
private: private:
TPoolAllocator allocator; angle::PoolAllocator allocator;
int mUniqueIndex; int mUniqueIndex;
}; };
......
...@@ -61,7 +61,7 @@ class RemovePowTest : public testing::Test ...@@ -61,7 +61,7 @@ class RemovePowTest : public testing::Test
sh::TranslatorGLSL *mTranslatorGLSL; sh::TranslatorGLSL *mTranslatorGLSL;
TIntermNode *mASTRoot; TIntermNode *mASTRoot;
TPoolAllocator allocator; angle::PoolAllocator allocator;
}; };
// Check if there's a pow() node anywhere in the tree. // Check if there's a pow() node anywhere in the tree.
......
...@@ -19,7 +19,7 @@ namespace sh ...@@ -19,7 +19,7 @@ namespace sh
// Verify that mangled name matches between a vector/matrix TType and a corresponding StaticType. // Verify that mangled name matches between a vector/matrix TType and a corresponding StaticType.
TEST(Type, VectorAndMatrixMangledNameConsistent) TEST(Type, VectorAndMatrixMangledNameConsistent)
{ {
TPoolAllocator allocator; angle::PoolAllocator allocator;
allocator.push(); allocator.push();
SetGlobalPoolAllocator(&allocator); SetGlobalPoolAllocator(&allocator);
......
...@@ -197,7 +197,7 @@ bool IsPlatformAvailable(const CompilerParameters &param) ...@@ -197,7 +197,7 @@ bool IsPlatformAvailable(const CompilerParameters &param)
case SH_HLSL_4_0_FL9_3_OUTPUT: case SH_HLSL_4_0_FL9_3_OUTPUT:
case SH_HLSL_3_0_OUTPUT: case SH_HLSL_3_0_OUTPUT:
{ {
TPoolAllocator allocator; angle::PoolAllocator allocator;
InitializePoolIndex(); InitializePoolIndex();
allocator.push(); allocator.push();
SetGlobalPoolAllocator(&allocator); SetGlobalPoolAllocator(&allocator);
...@@ -259,7 +259,7 @@ class CompilerPerfTest : public ANGLEPerfTest, ...@@ -259,7 +259,7 @@ class CompilerPerfTest : public ANGLEPerfTest,
const char *mTestShader; const char *mTestShader;
ShBuiltInResources mResources; ShBuiltInResources mResources;
TPoolAllocator mAllocator; angle::PoolAllocator mAllocator;
sh::TCompiler *mTranslator; sh::TCompiler *mTranslator;
}; };
......
...@@ -53,7 +53,7 @@ class ShaderCompileTreeTest : public testing::Test ...@@ -53,7 +53,7 @@ class ShaderCompileTreeTest : public testing::Test
private: private:
TranslatorESSL *mTranslator; TranslatorESSL *mTranslator;
TPoolAllocator mAllocator; angle::PoolAllocator mAllocator;
}; };
// Returns true if the node is some kind of a zero node - either constructor or a constant union // Returns true if the node is some kind of a zero node - either constructor or a constant union
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment