Commit 1c335ef4 by Jim Stichnoth

Subzero: Support non sequentially consistent memory orderings for atomic ops.

The actual code lowering is unchanged, but the validation is made less strict to allow the additional orderings. BUG= https://code.google.com/p/nativeclient/issues/detail?id=4029 R=jfb@chromium.org Review URL: https://codereview.chromium.org/1017453007
parent 833f13f9
...@@ -195,7 +195,8 @@ void GlobalContext::translateFunctions() { ...@@ -195,7 +195,8 @@ void GlobalContext::translateFunctions() {
if (Func->hasError()) { if (Func->hasError()) {
getErrorStatus()->assign(EC_Translation); getErrorStatus()->assign(EC_Translation);
OstreamLocker L(this); OstreamLocker L(this);
getStrDump() << "ICE translation error: " << Func->getError() << "\n"; getStrDump() << "ICE translation error: " << Func->getFunctionName()
<< ": " << Func->getError() << "\n";
Item = new EmitterWorkItem(Func->getSequenceNumber()); Item = new EmitterWorkItem(Func->getSequenceNumber());
} else { } else {
Func->getAssembler<>()->setInternal(Func->getInternal()); Func->getAssembler<>()->setInternal(Func->getInternal());
......
...@@ -233,9 +233,73 @@ const Intrinsics::FullIntrinsicInfo *Intrinsics::find(const IceString &Name, ...@@ -233,9 +233,73 @@ const Intrinsics::FullIntrinsicInfo *Intrinsics::find(const IceString &Name,
return &it->second; return &it->second;
} }
bool Intrinsics::VerifyMemoryOrder(uint64_t Order) { namespace {
// There is only one memory ordering for atomics allowed right now.
return Order == Intrinsics::MemoryOrderSequentiallyConsistent; // Returns whether PNaCl allows the given memory ordering in general.
bool isMemoryOrderValidPNaCl(uint64_t Order) {
switch (Order) {
case Intrinsics::MemoryOrderAcquire:
case Intrinsics::MemoryOrderRelease:
case Intrinsics::MemoryOrderAcquireRelease:
case Intrinsics::MemoryOrderSequentiallyConsistent:
return true;
default:
return false;
}
}
} // end of anonymous namespace
bool Intrinsics::isMemoryOrderValid(IntrinsicID ID, uint64_t Order,
uint64_t OrderOther) {
// Reject orderings not allowed in PNaCl.
if (!isMemoryOrderValidPNaCl(Order))
return false;
if (ID == AtomicCmpxchg && !isMemoryOrderValidPNaCl(OrderOther))
return false;
// Reject orderings not allowed by C++11.
switch (ID) {
default:
llvm_unreachable("isMemoryOrderValid: Unknown IntrinsicID");
return false;
case AtomicFence:
case AtomicFenceAll:
case AtomicRMW:
return true;
case AtomicCmpxchg:
// Reject orderings that are disallowed by C++11 as invalid
// combinations for cmpxchg.
switch (OrderOther) {
case MemoryOrderRelaxed:
case MemoryOrderConsume:
case MemoryOrderAcquire:
case MemoryOrderSequentiallyConsistent:
if (OrderOther > Order)
return false;
if (Order == MemoryOrderRelease && OrderOther != MemoryOrderRelaxed)
return false;
return true;
default:
return false;
}
case AtomicLoad:
switch (Order) {
case MemoryOrderRelease:
case MemoryOrderAcquireRelease:
return false;
default:
return true;
}
case AtomicStore:
switch (Order) {
case MemoryOrderConsume:
case MemoryOrderAcquire:
case MemoryOrderAcquireRelease:
return false;
default:
return true;
}
}
} }
Intrinsics::ValidateCallValue Intrinsics::ValidateCallValue
......
...@@ -91,7 +91,14 @@ public: ...@@ -91,7 +91,14 @@ public:
MemoryOrderNum // Invalid, keep last. MemoryOrderNum // Invalid, keep last.
}; };
static bool VerifyMemoryOrder(uint64_t Order); // Verify memory ordering rules for atomic intrinsics. For
// AtomicCmpxchg, Order is the "success" ordering and OrderOther is
// the "failure" ordering. Returns true if valid, false if invalid.
// TODO(stichnot,kschimpf): Perform memory order validation in the
// bitcode reader/parser, allowing LLVM and Subzero to share. See
// https://code.google.com/p/nativeclient/issues/detail?id=4126 .
static bool isMemoryOrderValid(IntrinsicID ID, uint64_t Order,
uint64_t OrderOther = MemoryOrderInvalid);
enum SideEffects { SideEffects_F = 0, SideEffects_T = 1 }; enum SideEffects { SideEffects_F = 0, SideEffects_T = 1 };
......
...@@ -2856,17 +2856,25 @@ void TargetX8632::lowerInsertElement(const InstInsertElement *Inst) { ...@@ -2856,17 +2856,25 @@ void TargetX8632::lowerInsertElement(const InstInsertElement *Inst) {
} }
} }
namespace {
// Converts a ConstantInteger32 operand into its constant value, or
// MemoryOrderInvalid if the operand is not a ConstantInteger32.
uint64_t getConstantMemoryOrder(Operand *Opnd) {
if (auto Integer = llvm::dyn_cast<ConstantInteger32>(Opnd))
return Integer->getValue();
return Intrinsics::MemoryOrderInvalid;
}
} // end of anonymous namespace
void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
switch (Instr->getIntrinsicInfo().ID) { switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) {
case Intrinsics::AtomicCmpxchg: { case Intrinsics::AtomicCmpxchg: {
if (!Intrinsics::VerifyMemoryOrder( if (!Intrinsics::isMemoryOrderValid(
llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) { ID, getConstantMemoryOrder(Instr->getArg(3)),
Func->setError("Unexpected memory ordering (success) for AtomicCmpxchg"); getConstantMemoryOrder(Instr->getArg(4)))) {
return; Func->setError("Unexpected memory ordering for AtomicCmpxchg");
}
if (!Intrinsics::VerifyMemoryOrder(
llvm::cast<ConstantInteger32>(Instr->getArg(4))->getValue())) {
Func->setError("Unexpected memory ordering (failure) for AtomicCmpxchg");
return; return;
} }
Variable *DestPrev = Instr->getDest(); Variable *DestPrev = Instr->getDest();
...@@ -2879,8 +2887,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { ...@@ -2879,8 +2887,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
return; return;
} }
case Intrinsics::AtomicFence: case Intrinsics::AtomicFence:
if (!Intrinsics::VerifyMemoryOrder( if (!Intrinsics::isMemoryOrderValid(
llvm::cast<ConstantInteger32>(Instr->getArg(0))->getValue())) { ID, getConstantMemoryOrder(Instr->getArg(0)))) {
Func->setError("Unexpected memory ordering for AtomicFence"); Func->setError("Unexpected memory ordering for AtomicFence");
return; return;
} }
...@@ -2925,8 +2933,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { ...@@ -2925,8 +2933,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
case Intrinsics::AtomicLoad: { case Intrinsics::AtomicLoad: {
// We require the memory address to be naturally aligned. // We require the memory address to be naturally aligned.
// Given that is the case, then normal loads are atomic. // Given that is the case, then normal loads are atomic.
if (!Intrinsics::VerifyMemoryOrder( if (!Intrinsics::isMemoryOrderValid(
llvm::cast<ConstantInteger32>(Instr->getArg(1))->getValue())) { ID, getConstantMemoryOrder(Instr->getArg(1)))) {
Func->setError("Unexpected memory ordering for AtomicLoad"); Func->setError("Unexpected memory ordering for AtomicLoad");
return; return;
} }
...@@ -2958,8 +2966,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { ...@@ -2958,8 +2966,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
return; return;
} }
case Intrinsics::AtomicRMW: case Intrinsics::AtomicRMW:
if (!Intrinsics::VerifyMemoryOrder( if (!Intrinsics::isMemoryOrderValid(
llvm::cast<ConstantInteger32>(Instr->getArg(3))->getValue())) { ID, getConstantMemoryOrder(Instr->getArg(3)))) {
Func->setError("Unexpected memory ordering for AtomicRMW"); Func->setError("Unexpected memory ordering for AtomicRMW");
return; return;
} }
...@@ -2969,8 +2977,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { ...@@ -2969,8 +2977,8 @@ void TargetX8632::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
Instr->getArg(1), Instr->getArg(2)); Instr->getArg(1), Instr->getArg(2));
return; return;
case Intrinsics::AtomicStore: { case Intrinsics::AtomicStore: {
if (!Intrinsics::VerifyMemoryOrder( if (!Intrinsics::isMemoryOrderValid(
llvm::cast<ConstantInteger32>(Instr->getArg(2))->getValue())) { ID, getConstantMemoryOrder(Instr->getArg(2)))) {
Func->setError("Unexpected memory ordering for AtomicStore"); Func->setError("Unexpected memory ordering for AtomicStore");
return; return;
} }
...@@ -4485,6 +4493,8 @@ OperandX8632Mem *TargetX8632::FormMemoryOperand(Operand *Operand, Type Ty) { ...@@ -4485,6 +4493,8 @@ OperandX8632Mem *TargetX8632::FormMemoryOperand(Operand *Operand, Type Ty) {
Constant *Offset = llvm::dyn_cast<Constant>(Operand); Constant *Offset = llvm::dyn_cast<Constant>(Operand);
assert(Base || Offset); assert(Base || Offset);
if (Offset) { if (Offset) {
// Make sure Offset is not undef.
Offset = llvm::cast<Constant>(legalize(Offset));
assert(llvm::isa<ConstantInteger32>(Offset) || assert(llvm::isa<ConstantInteger32>(Offset) ||
llvm::isa<ConstantRelocatable>(Offset)); llvm::isa<ConstantRelocatable>(Offset));
} }
......
...@@ -19,8 +19,8 @@ declare void @llvm.nacl.atomic.fence(i32) ...@@ -19,8 +19,8 @@ declare void @llvm.nacl.atomic.fence(i32)
declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*) declare i1 @llvm.nacl.atomic.is.lock.free(i32, i8*)
;;; Load ;;; Load
;;; Check unexpected memory order parameter (only sequential ;;; Check unexpected memory order parameter (release=4 and acq_rel=5
;;; consistency == 6 is currently allowed). ;;; are disallowed).
define i32 @error_atomic_load_8(i32 %iptr) { define i32 @error_atomic_load_8(i32 %iptr) {
entry: entry:
...@@ -34,7 +34,7 @@ entry: ...@@ -34,7 +34,7 @@ entry:
define i32 @error_atomic_load_16(i32 %iptr) { define i32 @error_atomic_load_16(i32 %iptr) {
entry: entry:
%ptr = inttoptr i32 %iptr to i16* %ptr = inttoptr i32 %iptr to i16*
%i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 1) %i = call i16 @llvm.nacl.atomic.load.i16(i16* %ptr, i32 4)
%r = zext i16 %i to i32 %r = zext i16 %i to i32
ret i32 %r ret i32 %r
} }
...@@ -43,13 +43,14 @@ entry: ...@@ -43,13 +43,14 @@ entry:
define i64 @error_atomic_load_64(i32 %iptr) { define i64 @error_atomic_load_64(i32 %iptr) {
entry: entry:
%ptr = inttoptr i32 %iptr to i64* %ptr = inttoptr i32 %iptr to i64*
%r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 2) %r = call i64 @llvm.nacl.atomic.load.i64(i64* %ptr, i32 5)
ret i64 %r ret i64 %r
} }
; CHECK: Unexpected memory ordering for AtomicLoad ; CHECK: Unexpected memory ordering for AtomicLoad
;;; Store ;;; Store
;;; consume=2, acquire=3, acq_rel=5 are disallowed
define void @error_atomic_store_32(i32 %iptr, i32 %v) { define void @error_atomic_store_32(i32 %iptr, i32 %v) {
entry: entry:
...@@ -70,19 +71,20 @@ entry: ...@@ -70,19 +71,20 @@ entry:
define void @error_atomic_store_64_const(i32 %iptr) { define void @error_atomic_store_64_const(i32 %iptr) {
entry: entry:
%ptr = inttoptr i32 %iptr to i64* %ptr = inttoptr i32 %iptr to i64*
call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 4) call void @llvm.nacl.atomic.store.i64(i64 12345678901234, i64* %ptr, i32 5)
ret void ret void
} }
; CHECK: Unexpected memory ordering for AtomicStore ; CHECK: Unexpected memory ordering for AtomicStore
;;; RMW ;;; RMW
;;; Test atomic memory order and operation. ;;; Test atomic memory order and operation.
;;; Modes 3:6 allowed.
define i32 @error_atomic_rmw_add_8(i32 %iptr, i32 %v) { define i32 @error_atomic_rmw_add_8(i32 %iptr, i32 %v) {
entry: entry:
%trunc = trunc i32 %v to i8 %trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8* %ptr = inttoptr i32 %iptr to i8*
%a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 5) %a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 1)
%a_ext = zext i8 %a to i32 %a_ext = zext i8 %a to i32
ret i32 %a_ext ret i32 %a_ext
} }
...@@ -91,7 +93,7 @@ entry: ...@@ -91,7 +93,7 @@ entry:
define i64 @error_atomic_rmw_add_64(i32 %iptr, i64 %v) { define i64 @error_atomic_rmw_add_64(i32 %iptr, i64 %v) {
entry: entry:
%ptr = inttoptr i32 %iptr to i64* %ptr = inttoptr i32 %iptr to i64*
%a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 4) %a = call i64 @llvm.nacl.atomic.rmw.i64(i32 1, i64* %ptr, i64 %v, i32 7)
ret i64 %a ret i64 %a
} }
; CHECK: Unexpected memory ordering for AtomicRMW ; CHECK: Unexpected memory ordering for AtomicRMW
...@@ -131,7 +133,7 @@ entry: ...@@ -131,7 +133,7 @@ entry:
i32 %desired, i32 0, i32 6) i32 %desired, i32 0, i32 6)
ret i32 %old ret i32 %old
} }
; CHECK: Unexpected memory ordering (success) for AtomicCmpxchg ; CHECK: Unexpected memory ordering for AtomicCmpxchg
define i32 @error_atomic_cmpxchg_32_failure(i32 %iptr, i32 %expected, i32 %desired) { define i32 @error_atomic_cmpxchg_32_failure(i32 %iptr, i32 %expected, i32 %desired) {
entry: entry:
...@@ -140,22 +142,22 @@ entry: ...@@ -140,22 +142,22 @@ entry:
i32 %desired, i32 6, i32 0) i32 %desired, i32 6, i32 0)
ret i32 %old ret i32 %old
} }
; CHECK: Unexpected memory ordering (failure) for AtomicCmpxchg ; CHECK: Unexpected memory ordering for AtomicCmpxchg
define i64 @error_atomic_cmpxchg_64_failure(i32 %iptr, i64 %expected, i64 %desired) { define i64 @error_atomic_cmpxchg_64_failure(i32 %iptr, i64 %expected, i64 %desired) {
entry: entry:
%ptr = inttoptr i32 %iptr to i64* %ptr = inttoptr i32 %iptr to i64*
%old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected, %old = call i64 @llvm.nacl.atomic.cmpxchg.i64(i64* %ptr, i64 %expected,
i64 %desired, i32 6, i32 3) i64 %desired, i32 4, i32 1)
ret i64 %old ret i64 %old
} }
; CHECK: Unexpected memory ordering (failure) for AtomicCmpxchg ; CHECK: Unexpected memory ordering for AtomicCmpxchg
;;; Fence and is-lock-free. ;;; Fence and is-lock-free.
define void @error_atomic_fence() { define void @error_atomic_fence() {
entry: entry:
call void @llvm.nacl.atomic.fence(i32 1) call void @llvm.nacl.atomic.fence(i32 0)
ret void ret void
} }
; CHECK: Unexpected memory ordering for AtomicFence ; CHECK: Unexpected memory ordering for AtomicFence
...@@ -168,3 +170,58 @@ entry: ...@@ -168,3 +170,58 @@ entry:
ret i32 %r ret i32 %r
} }
; CHECK: AtomicIsLockFree byte size should be compile-time const ; CHECK: AtomicIsLockFree byte size should be compile-time const
;;; Test bad non-constant memory ordering values.
define i32 @error_atomic_load_8_nonconst(i32 %iptr) {
entry:
%ptr = inttoptr i32 %iptr to i8*
%i = call i8 @llvm.nacl.atomic.load.i8(i8* %ptr, i32 %iptr)
%r = zext i8 %i to i32
ret i32 %r
}
; CHECK: Unexpected memory ordering for AtomicLoad
define void @error_atomic_store_32_nonconst(i32 %iptr, i32 %v) {
entry:
%ptr = inttoptr i32 %iptr to i32*
call void @llvm.nacl.atomic.store.i32(i32 %v, i32* %ptr, i32 %v)
ret void
}
; CHECK: Unexpected memory ordering for AtomicStore
define i32 @error_atomic_rmw_add_8_nonconst(i32 %iptr, i32 %v) {
entry:
%trunc = trunc i32 %v to i8
%ptr = inttoptr i32 %iptr to i8*
%a = call i8 @llvm.nacl.atomic.rmw.i8(i32 1, i8* %ptr, i8 %trunc, i32 %iptr)
%a_ext = zext i8 %a to i32
ret i32 %a_ext
}
; CHECK: Unexpected memory ordering for AtomicRMW
define i32 @error_atomic_cmpxchg_32_success_nonconst_1(i32 %iptr, i32 %expected, i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
i32 %desired, i32 %iptr, i32 6)
ret i32 %old
}
; CHECK: Unexpected memory ordering for AtomicCmpxchg
define i32 @error_atomic_cmpxchg_32_success_nonconst_2(i32 %iptr, i32 %expected, i32 %desired) {
entry:
%ptr = inttoptr i32 %iptr to i32*
%old = call i32 @llvm.nacl.atomic.cmpxchg.i32(i32* %ptr, i32 %expected,
i32 %desired, i32 6, i32 %iptr)
ret i32 %old
}
; CHECK: Unexpected memory ordering for AtomicCmpxchg
define void @error_atomic_fence_nonconst(i32 %v) {
entry:
call void @llvm.nacl.atomic.fence(i32 %v)
ret void
}
; CHECK: Unexpected memory ordering for AtomicFence
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment