Commit 59465799 by Nicolas Capens Committed by Nicolas Capens

Remove Reactor/MutexLock.hpp

It was already unused. Bug: b/129403963 Change-Id: Idd4d7d1b64bb6380364e86823787ce54bc1cf8ab Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/39730 Presubmit-Ready: Nicolas Capens <nicolascapens@google.com> Tested-by: 's avatarNicolas Capens <nicolascapens@google.com> Kokoro-Presubmit: kokoro <noreply+kokoro@google.com> Reviewed-by: 's avatarBen Clayton <bclayton@google.com>
parent 6e3a387c
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include "CPUID.hpp" #include "CPUID.hpp"
#include "ExecutableMemory.hpp" #include "ExecutableMemory.hpp"
#include "MutexLock.hpp"
#include "Thread.hpp" #include "Thread.hpp"
#include "x86.hpp" #include "x86.hpp"
......
// Copyright 2016 The SwiftShader Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef rr_MutexLock_hpp
#define rr_MutexLock_hpp
#include "Thread.hpp"
#if defined(__linux__)
// Use a pthread mutex on Linux. Since many processes may use Reactor
// at the same time it's best to just have the scheduler overhead.
# include <pthread.h>
namespace rr {
class MutexLock
{
public:
MutexLock()
{
pthread_mutex_init(&mutex, NULL);
}
~MutexLock()
{
pthread_mutex_destroy(&mutex);
}
bool attemptLock()
{
return pthread_mutex_trylock(&mutex) == 0;
}
void lock()
{
pthread_mutex_lock(&mutex);
}
void unlock()
{
pthread_mutex_unlock(&mutex);
}
private:
pthread_mutex_t mutex;
};
} // namespace rr
#else // !__linux__
# include <atomic>
namespace rr {
class BackoffLock
{
public:
BackoffLock()
{
mutex = 0;
}
bool attemptLock()
{
if(!isLocked())
{
if(mutex.exchange(true) == false)
{
return true;
}
}
return false;
}
void lock()
{
int backoff = 1;
while(!attemptLock())
{
if(backoff <= 64)
{
for(int i = 0; i < backoff; i++)
{
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
nop();
}
backoff *= 2;
}
else
{
Thread::yield();
backoff = 1;
}
};
}
void unlock()
{
mutex.store(false, std::memory_order_release);
}
bool isLocked()
{
return mutex.load(std::memory_order_acquire);
}
private:
struct
{
// Ensure that the mutex variable is on its own 64-byte cache line to avoid false sharing
// Padding must be public to avoid compiler warnings
volatile int padding1[16];
std::atomic<bool> mutex;
volatile int padding2[15];
};
};
using MutexLock = BackoffLock;
} // namespace rr
#endif // !__linux__
#endif // rr_MutexLock_hpp
...@@ -298,7 +298,6 @@ ...@@ -298,7 +298,6 @@
<ClInclude Include="LLVMReactor.hpp" /> <ClInclude Include="LLVMReactor.hpp" />
<ClInclude Include="LLVMReactorDebugInfo.hpp" /> <ClInclude Include="LLVMReactorDebugInfo.hpp" />
<ClInclude Include="ExecutableMemory.hpp" /> <ClInclude Include="ExecutableMemory.hpp" />
<ClInclude Include="MutexLock.hpp" />
<ClInclude Include="Nucleus.hpp" /> <ClInclude Include="Nucleus.hpp" />
<ClInclude Include="Print.hpp" /> <ClInclude Include="Print.hpp" />
<ClInclude Include="Reactor.hpp" /> <ClInclude Include="Reactor.hpp" />
......
...@@ -68,9 +68,6 @@ ...@@ -68,9 +68,6 @@
<ClInclude Include="ExecutableMemory.hpp"> <ClInclude Include="ExecutableMemory.hpp">
<Filter>Header Files</Filter> <Filter>Header Files</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="MutexLock.hpp">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Thread.hpp"> <ClInclude Include="Thread.hpp">
<Filter>Header Files</Filter> <Filter>Header Files</Filter>
</ClInclude> </ClInclude>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment