Commit ab82ea82 by Maxime Grégoire

Use of std::mutex instead of Resources in Kernels

Change-Id: Ic6aa0b26bb3295ac2ac6401472ec0106741db8cb Reviewed-on: https://swiftshader-review.googlesource.com/3714Reviewed-by: 's avatarMaxime Grégoire <mgregoire@google.com> Tested-by: 's avatarMaxime Grégoire <mgregoire@google.com>
parent 32e814e0
...@@ -150,7 +150,7 @@ size_t CPUKernel::typeOffset(size_t &offset, size_t type_len) ...@@ -150,7 +150,7 @@ size_t CPUKernel::typeOffset(size_t &offset, size_t type_len)
llvm::Function *CPUKernel::callFunction() llvm::Function *CPUKernel::callFunction()
{ {
p_call_function_mutex->lock(sw::PRIVATE); p_call_function_mutex->lock(sw::PUBLIC);
// If we can reuse the same function between work groups, do it // If we can reuse the same function between work groups, do it
if(p_call_function) if(p_call_function)
...@@ -276,9 +276,6 @@ CPUKernelEvent::CPUKernelEvent(CPUDevice *device, KernelEvent *event) ...@@ -276,9 +276,6 @@ CPUKernelEvent::CPUKernelEvent(CPUDevice *device, KernelEvent *event)
: p_device(device), p_event(event), p_current_wg(0), p_finished_wg(0), : p_device(device), p_event(event), p_current_wg(0), p_finished_wg(0),
p_kernel_args(0) p_kernel_args(0)
{ {
// Mutex
p_mutex = new sw::Resource(0);
// Set current work group to (0, 0, ..., 0) // Set current work group to (0, 0, ..., 0)
std::memset(p_current_work_group, 0, event->work_dim() * sizeof(size_t)); std::memset(p_current_work_group, 0, event->work_dim() * sizeof(size_t));
...@@ -296,9 +293,6 @@ CPUKernelEvent::CPUKernelEvent(CPUDevice *device, KernelEvent *event) ...@@ -296,9 +293,6 @@ CPUKernelEvent::CPUKernelEvent(CPUDevice *device, KernelEvent *event)
CPUKernelEvent::~CPUKernelEvent() CPUKernelEvent::~CPUKernelEvent()
{ {
p_mutex->lock(sw::DESTRUCT);
p_mutex->unlock();
p_mutex->destruct();
if(p_kernel_args) if(p_kernel_args)
std::free(p_kernel_args); std::free(p_kernel_args);
...@@ -307,7 +301,7 @@ CPUKernelEvent::~CPUKernelEvent() ...@@ -307,7 +301,7 @@ CPUKernelEvent::~CPUKernelEvent()
bool CPUKernelEvent::reserve() bool CPUKernelEvent::reserve()
{ {
// Lock, this will be unlocked in takeInstance() // Lock, this will be unlocked in takeInstance()
p_mutex->lock(sw::PRIVATE); p_mutex.lock();
// Last work group if current == max - 1 // Last work group if current == max - 1
return (p_current_wg == p_num_wg - 1); return (p_current_wg == p_num_wg - 1);
...@@ -317,22 +311,22 @@ bool CPUKernelEvent::finished() ...@@ -317,22 +311,22 @@ bool CPUKernelEvent::finished()
{ {
bool rs; bool rs;
p_mutex->lock(sw::PRIVATE); p_mutex.lock();
rs = (p_finished_wg == p_num_wg); rs = (p_finished_wg == p_num_wg);
p_mutex->unlock(); p_mutex.unlock();
return rs; return rs;
} }
void CPUKernelEvent::workGroupFinished() void CPUKernelEvent::workGroupFinished()
{ {
p_mutex->lock(sw::PRIVATE); p_mutex.lock();
p_finished_wg++; p_finished_wg++;
p_mutex->unlock(); p_mutex.unlock();
} }
CPUKernelWorkGroup *CPUKernelEvent::takeInstance() CPUKernelWorkGroup *CPUKernelEvent::takeInstance()
...@@ -347,7 +341,7 @@ CPUKernelWorkGroup *CPUKernelEvent::takeInstance() ...@@ -347,7 +341,7 @@ CPUKernelWorkGroup *CPUKernelEvent::takeInstance()
p_current_wg += 1; p_current_wg += 1;
// Release event // Release event
p_mutex->unlock(); p_mutex.unlock();
return wg; return wg;
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
//#include <llvm/ExecutionEngine/GenericValue.h> //#include <llvm/ExecutionEngine/GenericValue.h>
#include <vector> #include <vector>
#include <string> #include <string>
#include <mutex>
#include <stdint.h> #include <stdint.h>
...@@ -287,7 +288,7 @@ namespace Devices ...@@ -287,7 +288,7 @@ namespace Devices
size_t p_current_work_group[MAX_WORK_DIMS], size_t p_current_work_group[MAX_WORK_DIMS],
p_max_work_groups[MAX_WORK_DIMS]; p_max_work_groups[MAX_WORK_DIMS];
size_t p_current_wg, p_finished_wg, p_num_wg; size_t p_current_wg, p_finished_wg, p_num_wg;
sw::Resource *p_mutex; std::mutex p_mutex;
void *p_kernel_args; void *p_kernel_args;
}; };
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment