Commit 1c335ef4 by Jim Stichnoth

Subzero: Support non sequentially consistent memory orderings for atomic ops.

The actual code lowering is unchanged, but the validation is made less strict to allow the additional orderings. BUG= https://code.google.com/p/nativeclient/issues/detail?id=4029 R=jfb@chromium.org Review URL: https://codereview.chromium.org/1017453007
parent 833f13f9
......@@ -195,7 +195,8 @@ void GlobalContext::translateFunctions() {
if (Func->hasError()) {
getErrorStatus()->assign(EC_Translation);
OstreamLocker L(this);
getStrDump() << "ICE translation error: " << Func->getError() << "\n";
getStrDump() << "ICE translation error: " << Func->getFunctionName()
<< ": " << Func->getError() << "\n";
Item = new EmitterWorkItem(Func->getSequenceNumber());
} else {
Func->getAssembler<>()->setInternal(Func->getInternal());
......
......@@ -233,9 +233,73 @@ const Intrinsics::FullIntrinsicInfo *Intrinsics::find(const IceString &Name,
return &it->second;
}
bool Intrinsics::VerifyMemoryOrder(uint64_t Order) {
// There is only one memory ordering for atomics allowed right now.
return Order == Intrinsics::MemoryOrderSequentiallyConsistent;
namespace {
// Returns whether PNaCl allows the given memory ordering in general.
bool isMemoryOrderValidPNaCl(uint64_t Order) {
switch (Order) {
case Intrinsics::MemoryOrderAcquire:
case Intrinsics::MemoryOrderRelease:
case Intrinsics::MemoryOrderAcquireRelease:
case Intrinsics::MemoryOrderSequentiallyConsistent:
return true;
default:
return false;
}
}
} // end of anonymous namespace
bool Intrinsics::isMemoryOrderValid(IntrinsicID ID, uint64_t Order,
uint64_t OrderOther) {
// Reject orderings not allowed in PNaCl.
if (!isMemoryOrderValidPNaCl(Order))
return false;
if (ID == AtomicCmpxchg && !isMemoryOrderValidPNaCl(OrderOther))
return false;
// Reject orderings not allowed by C++11.
switch (ID) {
default:
llvm_unreachable("isMemoryOrderValid: Unknown IntrinsicID");
return false;
case AtomicFence:
case AtomicFenceAll:
case AtomicRMW:
return true;
case AtomicCmpxchg:
// Reject orderings that are disallowed by C++11 as invalid
// combinations for cmpxchg.
switch (OrderOther) {
case MemoryOrderRelaxed:
case MemoryOrderConsume:
case MemoryOrderAcquire:
case MemoryOrderSequentiallyConsistent:
if (OrderOther > Order)
return false;
if (Order == MemoryOrderRelease && OrderOther != MemoryOrderRelaxed)
return false;
return true;
default:
return false;
}
case AtomicLoad:
switch (Order) {
case MemoryOrderRelease:
case MemoryOrderAcquireRelease:
return false;
default:
return true;
}
case AtomicStore:
switch (Order) {
case MemoryOrderConsume:
case MemoryOrderAcquire:
case MemoryOrderAcquireRelease:
return false;
default:
return true;
}
}
}
Intrinsics::ValidateCallValue
......
......@@ -91,7 +91,14 @@ public:
MemoryOrderNum // Invalid, keep last.
};
static bool VerifyMemoryOrder(uint64_t Order);
// Verify memory ordering rules for atomic intrinsics. For
// AtomicCmpxchg, Order is the "success" ordering and OrderOther is
// the "failure" ordering. Returns true if valid, false if invalid.
// TODO(stichnot,kschimpf): Perform memory order validation in the
// bitcode reader/parser, allowing LLVM and Subzero to share. See
// https://code.google.com/p/nativeclient/issues/detail?id=4126 .
static bool isMemoryOrderValid(IntrinsicID ID, uint64_t Order,
uint64_t OrderOther = MemoryOrderInvalid);
enum SideEffects { SideEffects_F = 0, SideEffects_T = 1 };
......
......@@ -2856,17 +2856,25 @@ void TargetX8632::lowerInsertElement(const InstInsertElement *Inst) {
}
}
namespace {
// Converts a ConstantInteger32 operand into its constant value, or
// MemoryOrderInvalid if the operand is not a ConstantInteger32.
uint64_t getConstantMemoryOrder(Operand *Opnd) {
if (auto Integer = llvm::dyn_cast<ConstantInteger32>(Opnd))
return Integer->getValue();
return Intrinsics::MemoryOrderInvalid;
}
} // end of anonymous namespace
void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
switch (Instr->getIntrinsicInfo().ID) {
switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) {
case Intrinsics::AtomicCmpxchg: {
if (!Intrinsics::VerifyMemoryOrder(
llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) {
Func->setError("Unexpected memory ordering (success) for AtomicCmpxchg");
return;
}
if (!Intrinsics::VerifyMemoryOrder(
llvm::cast<ConstantInteger32>(Instr->getArg(4))->getValue())) {
Func->setError("Unexpected memory ordering (failure) for AtomicCmpxchg");
if (!Intrinsics::isMemoryOrderValid(
ID, getConstantMemoryOrder(Instr->getArg(3)),
getConstantMemoryOrder(Instr->getArg(4)))) {
Func->setError("Unexpected memory ordering for AtomicCmpxchg");
return;
}
Variable *DestPrev = Instr->getDest();
......@@ -2879,8 +2887,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
return;
}
case Intrinsics::AtomicFence:
if (!Intrinsics::VerifyMemoryOrder(
llvm::cast<ConstantInteger32>(Instr->getArg(0))->getValue())) {
if (!Intrinsics::isMemoryOrderValid(
ID, getConstantMemoryOrder(Instr->getArg(0)))) {
Func->setError("Unexpected memory ordering for AtomicFence");
return;
}
......@@ -2925,8 +2933,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
case Intrinsics::AtomicLoad: {
// We require the memory address to be naturally aligned.
// Given that is the case, then normal loads are atomic.
if (!Intrinsics::VerifyMemoryOrder(
llvm::cast<ConstantInteger32>(Instr->getArg(1))->getValue())) {
if (!Intrinsics::isMemoryOrderValid(
ID, getConstantMemoryOrder(Instr->getArg(1)))) {
Func->setError("Unexpected memory ordering for AtomicLoad");
return;
}
......@@ -2958,8 +2966,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
return;
}
case Intrinsics::AtomicRMW:
if (!Intrinsics::VerifyMemoryOrder(
llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) {
if (!Intrinsics::isMemoryOrderValid(
ID, getConstantMemoryOrder(Instr->getArg(3)))) {
Func->setError("Unexpected memory ordering for AtomicRMW");
return;
}
......@@ -2969,8 +2977,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
Instr->getArg(1), Instr->getArg(2));
return;
case Intrinsics::AtomicStore: {
if (!Intrinsics::VerifyMemoryOrder(
llvm::cast<ConstantInteger32>(Instr->getArg(2))->getValue())) {
if (!Intrinsics::isMemoryOrderValid(
ID, getConstantMemoryOrder(Instr->getArg(2)))) {
Func->setError("Unexpected memory ordering for AtomicStore");
return;
}
......@@ -4485,6 +4493,8 @@ OperandX8632Mem *TargetX8632::FormMemoryOperand(Operand *Operand, Type Ty) {
Constant *Offset = llvm::dyn_cast<Constant>(Operand);
assert(Base || Offset);
if (Offset) {
// Make sure Offset is not undef.
Offset = llvm::cast<Constant>(legalize(Offset));
assert(llvm::isa<ConstantInteger32>(Offset) ||
llvm::isa<ConstantRelocatable>(Offset));
}
......
......@@ -19,8 +19,8 @@ declare void @llvm.nacl.atomic.fence(i32)
declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*)
;;; Load
;;; Check unexpected memory order parameter (only sequential
;;; consistency == 6 is currently allowed).
;;; Check unexpected memory order parameter (release=4 and acq_rel=5
;;; are disallowed).
define i32 @error_atomic_load_8(i32 %iptr) {
entry:
......@@ -34,7 +34,7 @@ entry:
define i32 @error_atomic_load_16(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i16*
%i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 1)
%i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 4)
%r = zext i16 %i to i32
ret i32 %r
}
......@@ -43,13 +43,14 @@ entry:
define i64 @error_atomic_load_64(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 2)
%r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 5)
ret i64 %r
}
; CHECK: Unexpected memory ordering for AtomicLoad
;;; Store
;;; consume=2, acquire=3, acq_rel=5 are disallowed
define void @error_atomic_store_32(i32 %iptr, i32 %v) {
entry:
......@@ -70,19 +71,20 @@ entry:
define void @error_atomic_store_64_const(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i64*
call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 4)
call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 5)
ret void
}
; CHECK: Unexpected memory ordering for AtomicStore
;;; RMW
;;; Test atomic memory order and operation.
;;; Modes 3:6 allowed.
define i32 @error_atomic_rmw_add_8(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
%a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 5)
%a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 1)
%a_ext = zext i8 %a to i32
ret i32 %a_ext
}
......@@ -91,7 +93,7 @@ entry:
define i64 @error_atomic_rmw_add_64(i32 %iptr, i64 %v) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 4)
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 7)
ret i64 %a
}
; CHECK: Unexpected memory ordering for AtomicRMW
......@@ -131,7 +133,7 @@ entry:
i32 %desired, i32 0, i32 6)
ret i32 %old
}
; CHECK: Unexpected memory ordering (success) for AtomicCmpxchg
; CHECK: Unexpected memory ordering for AtomicCmpxchg
define i32 @error_atomic_cmpxchg_32_failure(i32 %iptr, i32 %expected, i32 %desired) {
entry:
......@@ -140,22 +142,22 @@ entry:
i32 %desired, i32 6, i32 0)
ret i32 %old
}
; CHECK: Unexpected memory ordering (failure) for AtomicCmpxchg
; CHECK: Unexpected memory ordering for AtomicCmpxchg
define i64 @error_atomic_cmpxchg_64_failure(i32 %iptr, i64 %expected, i64 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i64*
%old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
i64 %desired, i32 6, i32 3)
i64 %desired, i32 4, i32 1)
ret i64 %old
}
; CHECK: Unexpected memory ordering (failure) for AtomicCmpxchg
; CHECK: Unexpected memory ordering for AtomicCmpxchg
;;; Fence and is-lock-free.
define void @error_atomic_fence() {
entry:
call void @llvm.nacl.atomic.fence(i32 1)
call void @llvm.nacl.atomic.fence(i32 0)
ret void
}
; CHECK: Unexpected memory ordering for AtomicFence
......@@ -168,3 +170,58 @@ entry:
ret i32 %r
}
; CHECK: AtomicIsLockFree byte size should be compile-time const
;;; Test bad non-constant memory ordering values.
define i32 @error_atomic_load_8_nonconst(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 %iptr)
%r = zext i8 %i to i32
ret i32 %r
}
; CHECK: Unexpected memory ordering for AtomicLoad
define void @error_atomic_store_32_nonconst(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 %v)
ret void
}
; CHECK: Unexpected memory ordering for AtomicStore
define i32 @error_atomic_rmw_add_8_nonconst(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
%a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 %iptr)
%a_ext = zext i8 %a to i32
ret i32 %a_ext
}
; CHECK: Unexpected memory ordering for AtomicRMW
define i32 @error_atomic_cmpxchg_32_success_nonconst_1(i32 %iptr, i32 %expected, i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
i32 %desired, i32 %iptr, i32 6)
ret i32 %old
}
; CHECK: Unexpected memory ordering for AtomicCmpxchg
define i32 @error_atomic_cmpxchg_32_success_nonconst_2(i32 %iptr, i32 %expected, i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
i32 %desired, i32 6, i32 %iptr)
ret i32 %old
}
; CHECK: Unexpected memory ordering for AtomicCmpxchg
define void @error_atomic_fence_nonconst(i32 %v) {
entry:
call void @llvm.nacl.atomic.fence(i32 %v)
ret void
}
; CHECK: Unexpected memory ordering for AtomicFence
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment