Commit 3bf335f6 by John Porto

Subzero. RAII NaCl Bundling.

This CL introduces the TargetLowering::AutoBundle type, which allows RAII-style bundle emission. As part of the CL, all of the uses of TargetLowering::_bundle_lock(), and TargetLowering::_bundle_unlock(), were replaced with uses of the newly introduced type. BUG= R=sehr@chromium.org, stichnot@chromium.org Review URL: https://codereview.chromium.org/1585843007 .
parent 5403f5dc
...@@ -248,6 +248,25 @@ void TargetLowering::staticInit(GlobalContext *Ctx) { ...@@ -248,6 +248,25 @@ void TargetLowering::staticInit(GlobalContext *Ctx) {
TargetLowering::TargetLowering(Cfg *Func) TargetLowering::TargetLowering(Cfg *Func)
: Func(Func), Ctx(Func->getContext()), Context() {} : Func(Func), Ctx(Func->getContext()), Context() {}
TargetLowering::AutoBundle::AutoBundle(TargetLowering *Target,
InstBundleLock::Option Option)
: Target(Target),
NeedSandboxing(Target->Ctx->getFlags().getUseSandboxing()) {
assert(!Target->AutoBundling);
Target->AutoBundling = true;
if (NeedSandboxing) {
Target->_bundle_lock(Option);
}
}
TargetLowering::AutoBundle::~AutoBundle() {
assert(Target->AutoBundling);
Target->AutoBundling = false;
if (NeedSandboxing) {
Target->_bundle_unlock();
}
}
void TargetLowering::genTargetHelperCalls() { void TargetLowering::genTargetHelperCalls() {
for (CfgNode *Node : Func->getNodes()) { for (CfgNode *Node : Func->getNodes()) {
Context.init(Node); Context.init(Node);
......
...@@ -297,7 +297,43 @@ public: ...@@ -297,7 +297,43 @@ public:
virtual ~TargetLowering() = default; virtual ~TargetLowering() = default;
private:
// This control variable is used by AutoBundle (RAII-style bundle
// locking/unlocking) to prevent nested bundles.
bool AutoBundling = false;
// _bundle_lock(), and _bundle_unlock(), were made private to force subtargets
// to use the AutoBundle helper.
void
_bundle_lock(InstBundleLock::Option BundleOption = InstBundleLock::Opt_None) {
Context.insert<InstBundleLock>(BundleOption);
}
void _bundle_unlock() { Context.insert<InstBundleUnlock>(); }
protected: protected:
/// AutoBundle provides RIAA-style bundling. Sub-targets are expected to use
/// it when emitting NaCl Bundles to ensure proper bundle_unlocking, and
/// prevent nested bundles.
///
/// AutoBundle objects will emit a _bundle_lock during construction (but only
/// if sandboxed code generation was requested), and a bundle_unlock() during
/// destruction. By carefully scoping objects of this type, Subtargets can
/// ensure proper bundle emission.
class AutoBundle {
AutoBundle() = delete;
AutoBundle(const AutoBundle &) = delete;
AutoBundle &operator=(const AutoBundle &) = delete;
public:
explicit AutoBundle(TargetLowering *Target, InstBundleLock::Option Option =
InstBundleLock::Opt_None);
~AutoBundle();
private:
TargetLowering *const Target;
const bool NeedSandboxing;
};
explicit TargetLowering(Cfg *Func); explicit TargetLowering(Cfg *Func);
// Applies command line filters to TypeToRegisterSet array. // Applies command line filters to TypeToRegisterSet array.
static void static void
...@@ -394,11 +430,6 @@ protected: ...@@ -394,11 +430,6 @@ protected:
InstCall *makeHelperCall(const IceString &Name, Variable *Dest, InstCall *makeHelperCall(const IceString &Name, Variable *Dest,
SizeT MaxSrcs); SizeT MaxSrcs);
void
_bundle_lock(InstBundleLock::Option BundleOption = InstBundleLock::Opt_None) {
Context.insert<InstBundleLock>(BundleOption);
}
void _bundle_unlock() { Context.insert<InstBundleUnlock>(); }
void _set_dest_redefined() { Context.getLastInserted()->setDestRedefined(); } void _set_dest_redefined() { Context.getLastInserted()->setDestRedefined(); }
bool shouldOptimizeMemIntrins(); bool shouldOptimizeMemIntrins();
......
...@@ -1771,7 +1771,6 @@ void TargetARM32::postLowerLegalization() { ...@@ -1771,7 +1771,6 @@ void TargetARM32::postLowerLegalization() {
CurInstr->setDeleted(); CurInstr->setDeleted();
} }
} else if (auto *StrInstr = llvm::dyn_cast<InstARM32Str>(CurInstr)) { } else if (auto *StrInstr = llvm::dyn_cast<InstARM32Str>(CurInstr)) {
Sandboxer Bundle(this);
if (OperandARM32Mem *LegalMem = Legalizer.legalizeMemOperand( if (OperandARM32Mem *LegalMem = Legalizer.legalizeMemOperand(
llvm::cast<OperandARM32Mem>(StrInstr->getSrc(1)))) { llvm::cast<OperandARM32Mem>(StrInstr->getSrc(1)))) {
Sandboxer(this).str(llvm::cast<Variable>(CurInstr->getSrc(0)), Sandboxer(this).str(llvm::cast<Variable>(CurInstr->getSrc(0)),
...@@ -6193,17 +6192,9 @@ void TargetARM32::ComputationTracker::recordProducers(CfgNode *Node) { ...@@ -6193,17 +6192,9 @@ void TargetARM32::ComputationTracker::recordProducers(CfgNode *Node) {
TargetARM32::Sandboxer::Sandboxer(TargetARM32 *Target, TargetARM32::Sandboxer::Sandboxer(TargetARM32 *Target,
InstBundleLock::Option BundleOption) InstBundleLock::Option BundleOption)
: Target(Target) { : Bundler(Target, BundleOption), Target(Target) {}
if (Target->NeedSandboxing) {
Target->_bundle_lock(BundleOption);
}
}
TargetARM32::Sandboxer::~Sandboxer() { TargetARM32::Sandboxer::~Sandboxer() {}
if (Target->NeedSandboxing) {
Target->_bundle_unlock();
}
}
namespace { namespace {
OperandARM32FlexImm *indirectBranchBicMask(Cfg *Func) { OperandARM32FlexImm *indirectBranchBicMask(Cfg *Func) {
......
...@@ -942,6 +942,7 @@ protected: ...@@ -942,6 +942,7 @@ protected:
void sub_sp(Operand *SubAmount); void sub_sp(Operand *SubAmount);
private: private:
AutoBundle Bundler;
TargetARM32 *Target; TargetARM32 *Target;
}; };
......
...@@ -146,15 +146,15 @@ void TargetX8632::_sub_sp(Operand *Adjustment) { ...@@ -146,15 +146,15 @@ void TargetX8632::_sub_sp(Operand *Adjustment) {
} }
void TargetX8632::lowerIndirectJump(Variable *JumpTarget) { void TargetX8632::lowerIndirectJump(Variable *JumpTarget) {
AutoBundle _(this);
if (NeedSandboxing) { if (NeedSandboxing) {
_bundle_lock();
const SizeT BundleSize = const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes(); 1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(JumpTarget, Ctx->getConstantInt32(~(BundleSize - 1))); _and(JumpTarget, Ctx->getConstantInt32(~(BundleSize - 1)));
} }
_jmp(JumpTarget); _jmp(JumpTarget);
if (NeedSandboxing)
_bundle_unlock();
} }
void TargetX8632::lowerCall(const InstCall *Instr) { void TargetX8632::lowerCall(const InstCall *Instr) {
...@@ -278,24 +278,29 @@ void TargetX8632::lowerCall(const InstCall *Instr) { ...@@ -278,24 +278,29 @@ void TargetX8632::lowerCall(const InstCall *Instr) {
break; break;
} }
} }
Operand *CallTarget = Operand *CallTarget =
legalize(Instr->getCallTarget(), Legal_Reg | Legal_Imm | Legal_AddrAbs); legalize(Instr->getCallTarget(), Legal_Reg | Legal_Imm | Legal_AddrAbs);
if (NeedSandboxing) {
if (llvm::isa<Constant>(CallTarget)) { Traits::Insts::Call *NewCall;
_bundle_lock(InstBundleLock::Opt_AlignToEnd); /* AutoBundle scoping */ {
} else { std::unique_ptr<AutoBundle> Bundle;
Variable *CallTargetVar = nullptr; if (NeedSandboxing) {
_mov(CallTargetVar, CallTarget); if (llvm::isa<Constant>(CallTarget)) {
_bundle_lock(InstBundleLock::Opt_AlignToEnd); Bundle = makeUnique<AutoBundle>(this, InstBundleLock::Opt_AlignToEnd);
const SizeT BundleSize = } else {
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes(); Variable *CallTargetVar = nullptr;
_and(CallTargetVar, Ctx->getConstantInt32(~(BundleSize - 1))); _mov(CallTargetVar, CallTarget);
CallTarget = CallTargetVar; Bundle = makeUnique<AutoBundle>(this, InstBundleLock::Opt_AlignToEnd);
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(CallTargetVar, Ctx->getConstantInt32(~(BundleSize - 1)));
CallTarget = CallTargetVar;
}
} }
NewCall = Context.insert<Traits::Insts::Call>(ReturnReg, CallTarget);
} }
auto *NewCall = Context.insert<Traits::Insts::Call>(ReturnReg, CallTarget);
if (NeedSandboxing)
_bundle_unlock();
if (ReturnRegHi) if (ReturnRegHi)
Context.insert<InstFakeDef>(ReturnRegHi); Context.insert<InstFakeDef>(ReturnRegHi);
...@@ -749,8 +754,10 @@ void TargetX8632::addEpilog(CfgNode *Node) { ...@@ -749,8 +754,10 @@ void TargetX8632::addEpilog(CfgNode *Node) {
} }
} }
if (!NeedSandboxing) if (!NeedSandboxing) {
return; return;
}
// Change the original ret instruction into a sandboxed return sequence. // Change the original ret instruction into a sandboxed return sequence.
// t:ecx = pop // t:ecx = pop
// bundle_lock // bundle_lock
......
...@@ -165,12 +165,11 @@ void TargetX8664::_add_sp(Operand *Adjustment) { ...@@ -165,12 +165,11 @@ void TargetX8664::_add_sp(Operand *Adjustment) {
// add Adjustment, %esp // add Adjustment, %esp
// //
// instruction is not DCE'd. // instruction is not DCE'd.
_bundle_lock(); AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(esp, rsp)); _redefined(Context.insert<InstFakeDef>(esp, rsp));
_add(esp, Adjustment); _add(esp, Adjustment);
_redefined(Context.insert<InstFakeDef>(rsp, esp)); _redefined(Context.insert<InstFakeDef>(rsp, esp));
_add(rsp, r15); _add(rsp, r15);
_bundle_unlock();
} }
void TargetX8664::_mov_sp(Operand *NewValue) { void TargetX8664::_mov_sp(Operand *NewValue) {
...@@ -180,9 +179,7 @@ void TargetX8664::_mov_sp(Operand *NewValue) { ...@@ -180,9 +179,7 @@ void TargetX8664::_mov_sp(Operand *NewValue) {
Variable *rsp = Variable *rsp =
getPhysicalRegister(Traits::RegisterSet::Reg_rsp, IceType_i64); getPhysicalRegister(Traits::RegisterSet::Reg_rsp, IceType_i64);
if (NeedSandboxing) { AutoBundle _(this);
_bundle_lock();
}
_redefined(Context.insert<InstFakeDef>(esp, rsp)); _redefined(Context.insert<InstFakeDef>(esp, rsp));
_redefined(_mov(esp, NewValue)); _redefined(_mov(esp, NewValue));
...@@ -195,7 +192,6 @@ void TargetX8664::_mov_sp(Operand *NewValue) { ...@@ -195,7 +192,6 @@ void TargetX8664::_mov_sp(Operand *NewValue) {
Variable *r15 = Variable *r15 =
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64); getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_add(rsp, r15); _add(rsp, r15);
_bundle_unlock();
} }
void TargetX8664::_push_rbp() { void TargetX8664::_push_rbp() {
...@@ -218,10 +214,9 @@ void TargetX8664::_push_rbp() { ...@@ -218,10 +214,9 @@ void TargetX8664::_push_rbp() {
// .bundle_end // .bundle_end
// //
// to avoid leaking the upper 32-bits (i.e., the sandbox address.) // to avoid leaking the upper 32-bits (i.e., the sandbox address.)
_bundle_lock(); AutoBundle _(this);
_push(_0); _push(_0);
Context.insert<typename Traits::Insts::Store>(ebp, TopOfStack); Context.insert<typename Traits::Insts::Store>(ebp, TopOfStack);
_bundle_unlock();
} }
Traits::X86OperandMem *TargetX8664::_sandbox_mem_reference(X86OperandMem *Mem) { Traits::X86OperandMem *TargetX8664::_sandbox_mem_reference(X86OperandMem *Mem) {
...@@ -350,12 +345,11 @@ void TargetX8664::_sub_sp(Operand *Adjustment) { ...@@ -350,12 +345,11 @@ void TargetX8664::_sub_sp(Operand *Adjustment) {
// sub Adjustment, %esp // sub Adjustment, %esp
// add %r15, %rsp // add %r15, %rsp
// .bundle_end // .bundle_end
_bundle_lock(); AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(esp, rsp)); _redefined(Context.insert<InstFakeDef>(esp, rsp));
_sub(esp, Adjustment); _sub(esp, Adjustment);
_redefined(Context.insert<InstFakeDef>(rsp, esp)); _redefined(Context.insert<InstFakeDef>(rsp, esp));
_add(rsp, r15); _add(rsp, r15);
_bundle_unlock();
} }
void TargetX8664::initSandbox() { void TargetX8664::initSandbox() {
...@@ -369,6 +363,8 @@ void TargetX8664::initSandbox() { ...@@ -369,6 +363,8 @@ void TargetX8664::initSandbox() {
} }
void TargetX8664::lowerIndirectJump(Variable *JumpTarget) { void TargetX8664::lowerIndirectJump(Variable *JumpTarget) {
std::unique_ptr<AutoBundle> Bundler;
if (!NeedSandboxing) { if (!NeedSandboxing) {
Variable *T = makeReg(IceType_i64); Variable *T = makeReg(IceType_i64);
_movzx(T, JumpTarget); _movzx(T, JumpTarget);
...@@ -380,7 +376,7 @@ void TargetX8664::lowerIndirectJump(Variable *JumpTarget) { ...@@ -380,7 +376,7 @@ void TargetX8664::lowerIndirectJump(Variable *JumpTarget) {
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64); getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_mov(T, JumpTarget); _mov(T, JumpTarget);
_bundle_lock(); Bundler = makeUnique<AutoBundle>(this);
const SizeT BundleSize = const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes(); 1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(T, Ctx->getConstantInt32(~(BundleSize - 1))); _and(T, Ctx->getConstantInt32(~(BundleSize - 1)));
...@@ -390,8 +386,6 @@ void TargetX8664::lowerIndirectJump(Variable *JumpTarget) { ...@@ -390,8 +386,6 @@ void TargetX8664::lowerIndirectJump(Variable *JumpTarget) {
} }
_jmp(JumpTarget); _jmp(JumpTarget);
if (NeedSandboxing)
_bundle_unlock();
} }
namespace { namespace {
...@@ -599,30 +593,32 @@ void TargetX8664::lowerCall(const InstCall *Instr) { ...@@ -599,30 +593,32 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
ReturnAddress = InstX86Label::create(Func, this); ReturnAddress = InstX86Label::create(Func, this);
ReturnAddress->setIsReturnLocation(true); ReturnAddress->setIsReturnLocation(true);
constexpr bool SuppressMangling = true; constexpr bool SuppressMangling = true;
if (CallTargetR == nullptr) { /* AutoBundle scoping */ {
_bundle_lock(InstBundleLock::Opt_PadToEnd); std::unique_ptr<AutoBundle> Bundler;
_push(Ctx->getConstantSym(0, ReturnAddress->getName(Func), if (CallTargetR == nullptr) {
SuppressMangling)); Bundler = makeUnique<AutoBundle>(this, InstBundleLock::Opt_PadToEnd);
} else { _push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
Variable *T = makeReg(IceType_i32); SuppressMangling));
Variable *T64 = makeReg(IceType_i64); } else {
Variable *r15 = Variable *T = makeReg(IceType_i32);
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64); Variable *T64 = makeReg(IceType_i64);
Variable *r15 =
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_mov(T, CallTargetR);
Bundler = makeUnique<AutoBundle>(this, InstBundleLock::Opt_PadToEnd);
_push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
SuppressMangling));
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(T, Ctx->getConstantInt32(~(BundleSize - 1)));
_movzx(T64, T);
_add(T64, r15);
CallTarget = T64;
}
_mov(T, CallTargetR); NewCall = Context.insert<Traits::Insts::Jmp>(CallTarget);
_bundle_lock(InstBundleLock::Opt_PadToEnd);
_push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
SuppressMangling));
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(T, Ctx->getConstantInt32(~(BundleSize - 1)));
_movzx(T64, T);
_add(T64, r15);
CallTarget = T64;
} }
NewCall = Context.insert<Traits::Insts::Jmp>(CallTarget);
_bundle_unlock();
if (ReturnReg != nullptr) { if (ReturnReg != nullptr) {
Context.insert<InstFakeDef>(ReturnReg); Context.insert<InstFakeDef>(ReturnReg);
} }
...@@ -858,13 +854,12 @@ void TargetX8664::addProlog(CfgNode *Node) { ...@@ -858,13 +854,12 @@ void TargetX8664::addProlog(CfgNode *Node) {
} else { } else {
_push_rbp(); _push_rbp();
_bundle_lock(); AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(ebp, rbp)); _redefined(Context.insert<InstFakeDef>(ebp, rbp));
_redefined(Context.insert<InstFakeDef>(esp, rsp)); _redefined(Context.insert<InstFakeDef>(esp, rsp));
_mov(ebp, esp); _mov(ebp, esp);
_redefined(Context.insert<InstFakeDef>(rsp, esp)); _redefined(Context.insert<InstFakeDef>(rsp, esp));
_add(rbp, r15); _add(rbp, r15);
_bundle_unlock();
} }
// Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode). // Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode).
Context.insert<InstFakeUse>(rbp); Context.insert<InstFakeUse>(rbp);
...@@ -1057,12 +1052,11 @@ void TargetX8664::addEpilog(CfgNode *Node) { ...@@ -1057,12 +1052,11 @@ void TargetX8664::addEpilog(CfgNode *Node) {
_pop(rcx); _pop(rcx);
Context.insert<InstFakeDef>(ecx, rcx); Context.insert<InstFakeDef>(ecx, rcx);
_bundle_lock(); AutoBundle _(this);
_mov(ebp, ecx); _mov(ebp, ecx);
_redefined(Context.insert<InstFakeDef>(rbp, ebp)); _redefined(Context.insert<InstFakeDef>(rbp, ebp));
_add(rbp, r15); _add(rbp, r15);
_bundle_unlock();
} }
} }
...@@ -1097,15 +1091,16 @@ void TargetX8664::addEpilog(CfgNode *Node) { ...@@ -1097,15 +1091,16 @@ void TargetX8664::addEpilog(CfgNode *Node) {
Variable *r15 = Variable *r15 =
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64); getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_bundle_lock(); /* AutoBundle scoping */ {
const SizeT BundleSize = 1 AutoBundle _(this);
<< Func->getAssembler<>()->getBundleAlignLog2Bytes(); const SizeT BundleSize =
_and(T_ecx, Ctx->getConstantInt32(~(BundleSize - 1))); 1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
Context.insert<InstFakeDef>(T_rcx, T_ecx); _and(T_ecx, Ctx->getConstantInt32(~(BundleSize - 1)));
_add(T_rcx, r15); Context.insert<InstFakeDef>(T_rcx, T_ecx);
_add(T_rcx, r15);
_jmp(T_rcx); _jmp(T_rcx);
_bundle_unlock(); }
if (RI->getSrcSize()) { if (RI->getSrcSize()) {
auto *RetValue = llvm::cast<Variable>(RI->getSrc(0)); auto *RetValue = llvm::cast<Variable>(RI->getSrc(0));
......
...@@ -382,6 +382,7 @@ protected: ...@@ -382,6 +382,7 @@ protected:
X86OperandMem **findMemoryReference() { return nullptr; } X86OperandMem **findMemoryReference() { return nullptr; }
public: public:
std::unique_ptr<AutoBundle> Bundler;
X86OperandMem **const MemOperand; X86OperandMem **const MemOperand;
template <typename... T> template <typename... T>
...@@ -392,16 +393,12 @@ protected: ...@@ -392,16 +393,12 @@ protected:
? nullptr ? nullptr
: findMemoryReference(Args...)) { : findMemoryReference(Args...)) {
if (MemOperand != nullptr) { if (MemOperand != nullptr) {
Target->_bundle_lock(BundleLockOpt); Bundler = makeUnique<AutoBundle>(Target, BundleLockOpt);
*MemOperand = Target->_sandbox_mem_reference(*MemOperand); *MemOperand = Target->_sandbox_mem_reference(*MemOperand);
} }
} }
~AutoMemorySandboxer() { ~AutoMemorySandboxer() {}
if (MemOperand != nullptr) {
Target->_bundle_unlock();
}
}
}; };
/// The following are helpers that insert lowered x86 instructions with /// The following are helpers that insert lowered x86 instructions with
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment