Commit 3bf335f6 by John Porto

Subzero. RAII NaCl Bundling.

This CL introduces the TargetLowering::AutoBundle type, which allows RAII-style bundle emission. As part of the CL, all of the uses of TargetLowering::_bundle_lock(), and TargetLowering::_bundle_unlock(), were replaced with uses of the newly introduced type. BUG= R=sehr@chromium.org, stichnot@chromium.org Review URL: https://codereview.chromium.org/1585843007 .
parent 5403f5dc
......@@ -248,6 +248,25 @@ void TargetLowering::staticInit(GlobalContext *Ctx) {
TargetLowering::TargetLowering(Cfg *Func)
: Func(Func), Ctx(Func->getContext()), Context() {}
TargetLowering::AutoBundle::AutoBundle(TargetLowering *Target,
InstBundleLock::Option Option)
: Target(Target),
NeedSandboxing(Target->Ctx->getFlags().getUseSandboxing()) {
assert(!Target->AutoBundling);
Target->AutoBundling = true;
if (NeedSandboxing) {
Target->_bundle_lock(Option);
}
}
TargetLowering::AutoBundle::~AutoBundle() {
assert(Target->AutoBundling);
Target->AutoBundling = false;
if (NeedSandboxing) {
Target->_bundle_unlock();
}
}
void TargetLowering::genTargetHelperCalls() {
for (CfgNode *Node : Func->getNodes()) {
Context.init(Node);
......
......@@ -297,7 +297,43 @@ public:
virtual ~TargetLowering() = default;
private:
// This control variable is used by AutoBundle (RAII-style bundle
// locking/unlocking) to prevent nested bundles.
bool AutoBundling = false;
// _bundle_lock(), and _bundle_unlock(), were made private to force subtargets
// to use the AutoBundle helper.
void
_bundle_lock(InstBundleLock::Option BundleOption = InstBundleLock::Opt_None) {
Context.insert<InstBundleLock>(BundleOption);
}
void _bundle_unlock() { Context.insert<InstBundleUnlock>(); }
protected:
/// AutoBundle provides RIAA-style bundling. Sub-targets are expected to use
/// it when emitting NaCl Bundles to ensure proper bundle_unlocking, and
/// prevent nested bundles.
///
/// AutoBundle objects will emit a _bundle_lock during construction (but only
/// if sandboxed code generation was requested), and a bundle_unlock() during
/// destruction. By carefully scoping objects of this type, Subtargets can
/// ensure proper bundle emission.
class AutoBundle {
AutoBundle() = delete;
AutoBundle(const AutoBundle &) = delete;
AutoBundle &operator=(const AutoBundle &) = delete;
public:
explicit AutoBundle(TargetLowering *Target, InstBundleLock::Option Option =
InstBundleLock::Opt_None);
~AutoBundle();
private:
TargetLowering *const Target;
const bool NeedSandboxing;
};
explicit TargetLowering(Cfg *Func);
// Applies command line filters to TypeToRegisterSet array.
static void
......@@ -394,11 +430,6 @@ protected:
InstCall *makeHelperCall(const IceString &Name, Variable *Dest,
SizeT MaxSrcs);
void
_bundle_lock(InstBundleLock::Option BundleOption = InstBundleLock::Opt_None) {
Context.insert<InstBundleLock>(BundleOption);
}
void _bundle_unlock() { Context.insert<InstBundleUnlock>(); }
void _set_dest_redefined() { Context.getLastInserted()->setDestRedefined(); }
bool shouldOptimizeMemIntrins();
......
......@@ -1771,7 +1771,6 @@ void TargetARM32::postLowerLegalization() {
CurInstr->setDeleted();
}
} else if (auto *StrInstr = llvm::dyn_cast<InstARM32Str>(CurInstr)) {
Sandboxer Bundle(this);
if (OperandARM32Mem *LegalMem = Legalizer.legalizeMemOperand(
llvm::cast<OperandARM32Mem>(StrInstr->getSrc(1)))) {
Sandboxer(this).str(llvm::cast<Variable>(CurInstr->getSrc(0)),
......@@ -6193,17 +6192,9 @@ void TargetARM32::ComputationTracker::recordProducers(CfgNode *Node) {
TargetARM32::Sandboxer::Sandboxer(TargetARM32 *Target,
InstBundleLock::Option BundleOption)
: Target(Target) {
if (Target->NeedSandboxing) {
Target->_bundle_lock(BundleOption);
}
}
: Bundler(Target, BundleOption), Target(Target) {}
TargetARM32::Sandboxer::~Sandboxer() {
if (Target->NeedSandboxing) {
Target->_bundle_unlock();
}
}
TargetARM32::Sandboxer::~Sandboxer() {}
namespace {
OperandARM32FlexImm *indirectBranchBicMask(Cfg *Func) {
......
......@@ -942,6 +942,7 @@ protected:
void sub_sp(Operand *SubAmount);
private:
AutoBundle Bundler;
TargetARM32 *Target;
};
......
......@@ -146,15 +146,15 @@ void TargetX8632::_sub_sp(Operand *Adjustment) {
}
void TargetX8632::lowerIndirectJump(Variable *JumpTarget) {
AutoBundle _(this);
if (NeedSandboxing) {
_bundle_lock();
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(JumpTarget, Ctx->getConstantInt32(~(BundleSize - 1)));
}
_jmp(JumpTarget);
if (NeedSandboxing)
_bundle_unlock();
}
void TargetX8632::lowerCall(const InstCall *Instr) {
......@@ -278,24 +278,29 @@ void TargetX8632::lowerCall(const InstCall *Instr) {
break;
}
}
Operand *CallTarget =
legalize(Instr->getCallTarget(), Legal_Reg | Legal_Imm | Legal_AddrAbs);
if (NeedSandboxing) {
if (llvm::isa<Constant>(CallTarget)) {
_bundle_lock(InstBundleLock::Opt_AlignToEnd);
} else {
Variable *CallTargetVar = nullptr;
_mov(CallTargetVar, CallTarget);
_bundle_lock(InstBundleLock::Opt_AlignToEnd);
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(CallTargetVar, Ctx->getConstantInt32(~(BundleSize - 1)));
CallTarget = CallTargetVar;
Traits::Insts::Call *NewCall;
/* AutoBundle scoping */ {
std::unique_ptr<AutoBundle> Bundle;
if (NeedSandboxing) {
if (llvm::isa<Constant>(CallTarget)) {
Bundle = makeUnique<AutoBundle>(this, InstBundleLock::Opt_AlignToEnd);
} else {
Variable *CallTargetVar = nullptr;
_mov(CallTargetVar, CallTarget);
Bundle = makeUnique<AutoBundle>(this, InstBundleLock::Opt_AlignToEnd);
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(CallTargetVar, Ctx->getConstantInt32(~(BundleSize - 1)));
CallTarget = CallTargetVar;
}
}
NewCall = Context.insert<Traits::Insts::Call>(ReturnReg, CallTarget);
}
auto *NewCall = Context.insert<Traits::Insts::Call>(ReturnReg, CallTarget);
if (NeedSandboxing)
_bundle_unlock();
if (ReturnRegHi)
Context.insert<InstFakeDef>(ReturnRegHi);
......@@ -749,8 +754,10 @@ void TargetX8632::addEpilog(CfgNode *Node) {
}
}
if (!NeedSandboxing)
if (!NeedSandboxing) {
return;
}
// Change the original ret instruction into a sandboxed return sequence.
// t:ecx = pop
// bundle_lock
......
......@@ -165,12 +165,11 @@ void TargetX8664::_add_sp(Operand *Adjustment) {
// add Adjustment, %esp
//
// instruction is not DCE'd.
_bundle_lock();
AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(esp, rsp));
_add(esp, Adjustment);
_redefined(Context.insert<InstFakeDef>(rsp, esp));
_add(rsp, r15);
_bundle_unlock();
}
void TargetX8664::_mov_sp(Operand *NewValue) {
......@@ -180,9 +179,7 @@ void TargetX8664::_mov_sp(Operand *NewValue) {
Variable *rsp =
getPhysicalRegister(Traits::RegisterSet::Reg_rsp, IceType_i64);
if (NeedSandboxing) {
_bundle_lock();
}
AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(esp, rsp));
_redefined(_mov(esp, NewValue));
......@@ -195,7 +192,6 @@ void TargetX8664::_mov_sp(Operand *NewValue) {
Variable *r15 =
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_add(rsp, r15);
_bundle_unlock();
}
void TargetX8664::_push_rbp() {
......@@ -218,10 +214,9 @@ void TargetX8664::_push_rbp() {
// .bundle_end
//
// to avoid leaking the upper 32-bits (i.e., the sandbox address.)
_bundle_lock();
AutoBundle _(this);
_push(_0);
Context.insert<typename Traits::Insts::Store>(ebp, TopOfStack);
_bundle_unlock();
}
Traits::X86OperandMem *TargetX8664::_sandbox_mem_reference(X86OperandMem *Mem) {
......@@ -350,12 +345,11 @@ void TargetX8664::_sub_sp(Operand *Adjustment) {
// sub Adjustment, %esp
// add %r15, %rsp
// .bundle_end
_bundle_lock();
AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(esp, rsp));
_sub(esp, Adjustment);
_redefined(Context.insert<InstFakeDef>(rsp, esp));
_add(rsp, r15);
_bundle_unlock();
}
void TargetX8664::initSandbox() {
......@@ -369,6 +363,8 @@ void TargetX8664::initSandbox() {
}
void TargetX8664::lowerIndirectJump(Variable *JumpTarget) {
std::unique_ptr<AutoBundle> Bundler;
if (!NeedSandboxing) {
Variable *T = makeReg(IceType_i64);
_movzx(T, JumpTarget);
......@@ -380,7 +376,7 @@ void TargetX8664::lowerIndirectJump(Variable *JumpTarget) {
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_mov(T, JumpTarget);
_bundle_lock();
Bundler = makeUnique<AutoBundle>(this);
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(T, Ctx->getConstantInt32(~(BundleSize - 1)));
......@@ -390,8 +386,6 @@ void TargetX8664::lowerIndirectJump(Variable *JumpTarget) {
}
_jmp(JumpTarget);
if (NeedSandboxing)
_bundle_unlock();
}
namespace {
......@@ -599,30 +593,32 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
ReturnAddress = InstX86Label::create(Func, this);
ReturnAddress->setIsReturnLocation(true);
constexpr bool SuppressMangling = true;
if (CallTargetR == nullptr) {
_bundle_lock(InstBundleLock::Opt_PadToEnd);
_push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
SuppressMangling));
} else {
Variable *T = makeReg(IceType_i32);
Variable *T64 = makeReg(IceType_i64);
Variable *r15 =
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
/* AutoBundle scoping */ {
std::unique_ptr<AutoBundle> Bundler;
if (CallTargetR == nullptr) {
Bundler = makeUnique<AutoBundle>(this, InstBundleLock::Opt_PadToEnd);
_push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
SuppressMangling));
} else {
Variable *T = makeReg(IceType_i32);
Variable *T64 = makeReg(IceType_i64);
Variable *r15 =
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_mov(T, CallTargetR);
Bundler = makeUnique<AutoBundle>(this, InstBundleLock::Opt_PadToEnd);
_push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
SuppressMangling));
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(T, Ctx->getConstantInt32(~(BundleSize - 1)));
_movzx(T64, T);
_add(T64, r15);
CallTarget = T64;
}
_mov(T, CallTargetR);
_bundle_lock(InstBundleLock::Opt_PadToEnd);
_push(Ctx->getConstantSym(0, ReturnAddress->getName(Func),
SuppressMangling));
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(T, Ctx->getConstantInt32(~(BundleSize - 1)));
_movzx(T64, T);
_add(T64, r15);
CallTarget = T64;
NewCall = Context.insert<Traits::Insts::Jmp>(CallTarget);
}
NewCall = Context.insert<Traits::Insts::Jmp>(CallTarget);
_bundle_unlock();
if (ReturnReg != nullptr) {
Context.insert<InstFakeDef>(ReturnReg);
}
......@@ -858,13 +854,12 @@ void TargetX8664::addProlog(CfgNode *Node) {
} else {
_push_rbp();
_bundle_lock();
AutoBundle _(this);
_redefined(Context.insert<InstFakeDef>(ebp, rbp));
_redefined(Context.insert<InstFakeDef>(esp, rsp));
_mov(ebp, esp);
_redefined(Context.insert<InstFakeDef>(rsp, esp));
_add(rbp, r15);
_bundle_unlock();
}
// Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode).
Context.insert<InstFakeUse>(rbp);
......@@ -1057,12 +1052,11 @@ void TargetX8664::addEpilog(CfgNode *Node) {
_pop(rcx);
Context.insert<InstFakeDef>(ecx, rcx);
_bundle_lock();
AutoBundle _(this);
_mov(ebp, ecx);
_redefined(Context.insert<InstFakeDef>(rbp, ebp));
_add(rbp, r15);
_bundle_unlock();
}
}
......@@ -1097,15 +1091,16 @@ void TargetX8664::addEpilog(CfgNode *Node) {
Variable *r15 =
getPhysicalRegister(Traits::RegisterSet::Reg_r15, IceType_i64);
_bundle_lock();
const SizeT BundleSize = 1
<< Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(T_ecx, Ctx->getConstantInt32(~(BundleSize - 1)));
Context.insert<InstFakeDef>(T_rcx, T_ecx);
_add(T_rcx, r15);
/* AutoBundle scoping */ {
AutoBundle _(this);
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(T_ecx, Ctx->getConstantInt32(~(BundleSize - 1)));
Context.insert<InstFakeDef>(T_rcx, T_ecx);
_add(T_rcx, r15);
_jmp(T_rcx);
_bundle_unlock();
_jmp(T_rcx);
}
if (RI->getSrcSize()) {
auto *RetValue = llvm::cast<Variable>(RI->getSrc(0));
......
......@@ -382,6 +382,7 @@ protected:
X86OperandMem **findMemoryReference() { return nullptr; }
public:
std::unique_ptr<AutoBundle> Bundler;
X86OperandMem **const MemOperand;
template <typename... T>
......@@ -392,16 +393,12 @@ protected:
? nullptr
: findMemoryReference(Args...)) {
if (MemOperand != nullptr) {
Target->_bundle_lock(BundleLockOpt);
Bundler = makeUnique<AutoBundle>(Target, BundleLockOpt);
*MemOperand = Target->_sandbox_mem_reference(*MemOperand);
}
}
~AutoMemorySandboxer() {
if (MemOperand != nullptr) {
Target->_bundle_unlock();
}
}
~AutoMemorySandboxer() {}
};
/// The following are helpers that insert lowered x86 instructions with
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment