Commit 0eb5c839 by Sergey Ulanov Committed by Sergey Ulanov

[fuchsia] Simplify memory allocation

Previuously Fuchsia-specific memory allocation logic was trying to deal with non-page-aligned address returned from zx_vmar_map(), but it's no longer necessary because zx_vmap_map() is guaranteed to return page-aligned address. Also it was trying to unmap partial pages, but zx_vmar_unmap() now requires that len is page-aligned. This change fixes both issues by removing two redundant zx_vmar_unmap() calls. Also deallocateExecutable() has been updated to pass page-aligned size to zx_vmar_unmap(). This CL resolves test failures in some viz_unittests on Fuchsia. Bug: chromium:961455 Change-Id: Ib53e46af853802679a55dacc8546c3f67a3486c7 Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/34409Tested-by: 's avatarSergey Ulanov <sergeyu@chromium.org> Kokoro-Presubmit: kokoro <noreply+kokoro@google.com> Reviewed-by: 's avatarNicolas Capens <nicolascapens@google.com>
parent 8cce70e1
...@@ -233,25 +233,10 @@ void *allocateExecutable(size_t bytes) ...@@ -233,25 +233,10 @@ void *allocateExecutable(size_t bytes)
return nullptr; return nullptr;
} }
zx_vaddr_t alignedReservation = roundUp(reservation, pageSize); // zx_vmar_map() returns page-aligned address.
mapping = reinterpret_cast<void*>(alignedReservation); ASSERT(roundUp(reservation, pageSize) == reservation);
// Unmap extra memory reserved before the block.
if (alignedReservation != reservation) {
size_t prefix_size = alignedReservation - reservation;
status =
zx_vmar_unmap(zx_vmar_root_self(), reservation, prefix_size);
ASSERT(status == ZX_OK);
length -= prefix_size;
}
// Unmap extra memory at the end. mapping = reinterpret_cast<void*>(reservation);
if (length > bytes) {
status = zx_vmar_unmap(
zx_vmar_root_self(), alignedReservation + bytes,
length - bytes);
ASSERT(status == ZX_OK);
}
#else #else
mapping = allocate(length, pageSize); mapping = allocate(length, pageSize);
#endif #endif
...@@ -265,10 +250,12 @@ void markExecutable(void *memory, size_t bytes) ...@@ -265,10 +250,12 @@ void markExecutable(void *memory, size_t bytes)
unsigned long oldProtection; unsigned long oldProtection;
VirtualProtect(memory, bytes, PAGE_EXECUTE_READ, &oldProtection); VirtualProtect(memory, bytes, PAGE_EXECUTE_READ, &oldProtection);
#elif defined(__Fuchsia__) #elif defined(__Fuchsia__)
size_t pageSize = memoryPageSize();
size_t length = roundUp(bytes, pageSize);
zx_status_t status = zx_vmar_protect( zx_status_t status = zx_vmar_protect(
zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE, zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_EXECUTE,
reinterpret_cast<zx_vaddr_t>(memory), bytes); reinterpret_cast<zx_vaddr_t>(memory), length);
ASSERT(status != ZX_OK); ASSERT(status == ZX_OK);
#else #else
mprotect(memory, bytes, PROT_READ | PROT_EXEC); mprotect(memory, bytes, PROT_READ | PROT_EXEC);
#endif #endif
...@@ -285,8 +272,11 @@ void deallocateExecutable(void *memory, size_t bytes) ...@@ -285,8 +272,11 @@ void deallocateExecutable(void *memory, size_t bytes)
size_t length = (bytes + pageSize - 1) & ~(pageSize - 1); size_t length = (bytes + pageSize - 1) & ~(pageSize - 1);
munmap(memory, length); munmap(memory, length);
#elif defined(__Fuchsia__) #elif defined(__Fuchsia__)
zx_vmar_unmap(zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), size_t pageSize = memoryPageSize();
bytes); size_t length = roundUp(bytes, pageSize);
zx_status_t status = zx_vmar_unmap(
zx_vmar_root_self(), reinterpret_cast<zx_vaddr_t>(memory), length);
ASSERT(status == ZX_OK);
#else #else
mprotect(memory, bytes, PROT_READ | PROT_WRITE); mprotect(memory, bytes, PROT_READ | PROT_WRITE);
deallocate(memory); deallocate(memory);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment