Commit 1d937a8e by John Porto

Subzero. Introduces a new LoweringContext::insert() method.

Emitting an instruction in Subzero requires a fair amount of boilerplated code: Context.insert(<InstType>::create(Func, <Args>...)); The ordeal is worse if one needs access to the recently create instructionL auto *Instr = <InstType>::create(Func, <Args>...); Context.insert(Instr); Instr->... This CL introduces a new LoweringContext::insert() method: template <<InstType>, <Args>...> <InstType> *LoweringContext::insert(<Args>...) { auto *New = Inst::create(Node.Cfg, <Args>...); insert(New); return New; } This is essentially a syntatic sugar that allows instructions to be emitted by using Context.insert<InstType>(<Args>...); The compiler should be able to inline the calls (and get rid of the return value) when appropriate. make bloat reviews a small increase in translator code size BUG= R=sehr@chromium.org, stichnot@chromium.org Review URL: https://codereview.chromium.org/1527143003 .
parent 751e27ec
...@@ -23,11 +23,14 @@ ...@@ -23,11 +23,14 @@
#ifndef SUBZERO_SRC_ICETARGETLOWERING_H #ifndef SUBZERO_SRC_ICETARGETLOWERING_H
#define SUBZERO_SRC_ICETARGETLOWERING_H #define SUBZERO_SRC_ICETARGETLOWERING_H
#include "IceCfgNode.h"
#include "IceDefs.h" #include "IceDefs.h"
#include "IceInst.h" // for the names of the Inst subtypes #include "IceInst.h" // for the names of the Inst subtypes
#include "IceOperand.h" #include "IceOperand.h"
#include "IceTypes.h" #include "IceTypes.h"
#include <utility>
namespace Ice { namespace Ice {
// UnimplementedError is defined as a macro so that we can get actual line // UnimplementedError is defined as a macro so that we can get actual line
...@@ -72,6 +75,11 @@ public: ...@@ -72,6 +75,11 @@ public:
InstList::iterator getNext() const { return Next; } InstList::iterator getNext() const { return Next; }
InstList::iterator getEnd() const { return End; } InstList::iterator getEnd() const { return End; }
void insert(Inst *Inst); void insert(Inst *Inst);
template <typename Inst, typename... Args> Inst *insert(Args &&... A) {
auto *New = Inst::create(Node->getCfg(), std::forward<Args>(A)...);
insert(New);
return New;
}
Inst *getLastInserted() const; Inst *getLastInserted() const;
void advanceCur() { Cur = Next; } void advanceCur() { Cur = Next; }
void advanceNext() { advanceForward(Next); } void advanceNext() { advanceForward(Next); }
...@@ -370,9 +378,9 @@ protected: ...@@ -370,9 +378,9 @@ protected:
void void
_bundle_lock(InstBundleLock::Option BundleOption = InstBundleLock::Opt_None) { _bundle_lock(InstBundleLock::Option BundleOption = InstBundleLock::Opt_None) {
Context.insert(InstBundleLock::create(Func, BundleOption)); Context.insert<InstBundleLock>(BundleOption);
} }
void _bundle_unlock() { Context.insert(InstBundleUnlock::create(Func)); } void _bundle_unlock() { Context.insert<InstBundleUnlock>(); }
void _set_dest_redefined() { Context.getLastInserted()->setDestRedefined(); } void _set_dest_redefined() { Context.getLastInserted()->setDestRedefined(); }
bool shouldOptimizeMemIntrins(); bool shouldOptimizeMemIntrins();
......
...@@ -388,7 +388,7 @@ void TargetMIPS32::lowerArguments() { ...@@ -388,7 +388,7 @@ void TargetMIPS32::lowerArguments() {
RegisterArg64On32->getHi()->setRegNum(RegHi); RegisterArg64On32->getHi()->setRegNum(RegHi);
Arg->setIsArg(false); Arg->setIsArg(false);
Args[I] = RegisterArg64On32; Args[I] = RegisterArg64On32;
Context.insert(InstAssign::create(Func, Arg, RegisterArg)); Context.insert<InstAssign>(Arg, RegisterArg);
continue; continue;
} else { } else {
assert(Ty == IceType_i32); assert(Ty == IceType_i32);
...@@ -404,7 +404,7 @@ void TargetMIPS32::lowerArguments() { ...@@ -404,7 +404,7 @@ void TargetMIPS32::lowerArguments() {
RegisterArg->setIsArg(); RegisterArg->setIsArg();
Arg->setIsArg(false); Arg->setIsArg(false);
Args[I] = RegisterArg; Args[I] = RegisterArg;
Context.insert(InstAssign::create(Func, Arg, RegisterArg)); Context.insert<InstAssign>(Arg, RegisterArg);
} }
} }
} }
...@@ -533,13 +533,13 @@ void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) { ...@@ -533,13 +533,13 @@ void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) {
// TODO(reed kotler): fakedef needed for now until all cases are implemented // TODO(reed kotler): fakedef needed for now until all cases are implemented
auto *DestLo = llvm::cast<Variable>(loOperand(Dest)); auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
auto *DestHi = llvm::cast<Variable>(hiOperand(Dest)); auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
Context.insert(InstFakeDef::create(Func, DestLo)); Context.insert<InstFakeDef>(DestLo);
Context.insert(InstFakeDef::create(Func, DestHi)); Context.insert<InstFakeDef>(DestHi);
UnimplementedError(Func->getContext()->getFlags()); UnimplementedError(Func->getContext()->getFlags());
return; return;
} }
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Context.insert(InstFakeDef::create(Func, Dest)); Context.insert<InstFakeDef>(Dest);
UnimplementedError(Func->getContext()->getFlags()); UnimplementedError(Func->getContext()->getFlags());
return; return;
} }
...@@ -602,9 +602,9 @@ void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) { ...@@ -602,9 +602,9 @@ void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) {
} }
// TODO(reed kotler): // TODO(reed kotler):
// fakedef and fakeuse needed for now until all cases are implemented // fakedef and fakeuse needed for now until all cases are implemented
Context.insert(InstFakeUse::create(Func, Src0R)); Context.insert<InstFakeUse>(Src0R);
Context.insert(InstFakeUse::create(Func, Src1R)); Context.insert<InstFakeUse>(Src1R);
Context.insert(InstFakeDef::create(Func, Dest)); Context.insert<InstFakeDef>(Dest);
UnimplementedError(Func->getContext()->getFlags()); UnimplementedError(Func->getContext()->getFlags());
} }
...@@ -888,7 +888,7 @@ void TargetMIPS32::lowerRet(const InstRet *Inst) { ...@@ -888,7 +888,7 @@ void TargetMIPS32::lowerRet(const InstRet *Inst) {
Variable *R0 = legalizeToReg(loOperand(Src0), RegMIPS32::Reg_V0); Variable *R0 = legalizeToReg(loOperand(Src0), RegMIPS32::Reg_V0);
Variable *R1 = legalizeToReg(hiOperand(Src0), RegMIPS32::Reg_V1); Variable *R1 = legalizeToReg(hiOperand(Src0), RegMIPS32::Reg_V1);
Reg = R0; Reg = R0;
Context.insert(InstFakeUse::create(Func, R1)); Context.insert<InstFakeUse>(R1);
break; break;
} }
...@@ -1022,7 +1022,7 @@ Operand *TargetMIPS32::legalize(Operand *From, LegalMask Allowed, ...@@ -1022,7 +1022,7 @@ Operand *TargetMIPS32::legalize(Operand *From, LegalMask Allowed,
(void)C; (void)C;
// TODO(reed kotler): complete this case for proper implementation // TODO(reed kotler): complete this case for proper implementation
Variable *Reg = makeReg(Ty, RegNum); Variable *Reg = makeReg(Ty, RegNum);
Context.insert(InstFakeDef::create(Func, Reg)); Context.insert<InstFakeDef>(Reg);
return Reg; return Reg;
} else if (auto *C32 = llvm::dyn_cast<ConstantInteger32>(From)) { } else if (auto *C32 = llvm::dyn_cast<ConstantInteger32>(From)) {
uint32_t Value = static_cast<uint32_t>(C32->getValue()); uint32_t Value = static_cast<uint32_t>(C32->getValue());
......
...@@ -116,60 +116,59 @@ public: ...@@ -116,60 +116,59 @@ public:
// minimal syntactic overhead, so that the lowering code can look as close to // minimal syntactic overhead, so that the lowering code can look as close to
// assembly as practical. // assembly as practical.
void _add(Variable *Dest, Variable *Src0, Variable *Src1) { void _add(Variable *Dest, Variable *Src0, Variable *Src1) {
Context.insert(InstMIPS32Add::create(Func, Dest, Src0, Src1)); Context.insert<InstMIPS32Add>(Dest, Src0, Src1);
} }
void _and(Variable *Dest, Variable *Src0, Variable *Src1) { void _and(Variable *Dest, Variable *Src0, Variable *Src1) {
Context.insert(InstMIPS32And::create(Func, Dest, Src0, Src1)); Context.insert<InstMIPS32And>(Dest, Src0, Src1);
} }
void _ret(Variable *RA, Variable *Src0 = nullptr) { void _ret(Variable *RA, Variable *Src0 = nullptr) {
Context.insert(InstMIPS32Ret::create(Func, RA, Src0)); Context.insert<InstMIPS32Ret>(RA, Src0);
} }
void _addiu(Variable *Dest, Variable *Src, uint32_t Imm) { void _addiu(Variable *Dest, Variable *Src, uint32_t Imm) {
Context.insert(InstMIPS32Addiu::create(Func, Dest, Src, Imm)); Context.insert<InstMIPS32Addiu>(Dest, Src, Imm);
} }
void _lui(Variable *Dest, uint32_t Imm) { void _lui(Variable *Dest, uint32_t Imm) {
Context.insert(InstMIPS32Lui::create(Func, Dest, Imm)); Context.insert<InstMIPS32Lui>(Dest, Imm);
} }
void _mov(Variable *Dest, Operand *Src0) { void _mov(Variable *Dest, Operand *Src0) {
assert(Dest != nullptr); assert(Dest != nullptr);
// Variable* Src0_ = llvm::dyn_cast<Variable>(Src0); // Variable* Src0_ = llvm::dyn_cast<Variable>(Src0);
if (llvm::isa<ConstantRelocatable>(Src0)) { if (llvm::isa<ConstantRelocatable>(Src0)) {
Context.insert(InstMIPS32La::create(Func, Dest, Src0)); Context.insert<InstMIPS32La>(Dest, Src0);
} else { } else {
auto *Instr = InstMIPS32Mov::create(Func, Dest, Src0); auto *Instr = Context.insert<InstMIPS32Mov>(Dest, Src0);
Context.insert(Instr);
if (Instr->isMultiDest()) { if (Instr->isMultiDest()) {
// If Instr is multi-dest, then Dest must be a Variable64On32. We add a // If Instr is multi-dest, then Dest must be a Variable64On32. We add a
// fake-def for Instr.DestHi here. // fake-def for Instr.DestHi here.
assert(llvm::isa<Variable64On32>(Dest)); assert(llvm::isa<Variable64On32>(Dest));
Context.insert(InstFakeDef::create(Func, Instr->getDestHi())); Context.insert<InstFakeDef>(Instr->getDestHi());
} }
} }
} }
void _mul(Variable *Dest, Variable *Src0, Variable *Src1) { void _mul(Variable *Dest, Variable *Src0, Variable *Src1) {
Context.insert(InstMIPS32Mul::create(Func, Dest, Src0, Src1)); Context.insert<InstMIPS32Mul>(Dest, Src0, Src1);
} }
void _or(Variable *Dest, Variable *Src0, Variable *Src1) { void _or(Variable *Dest, Variable *Src0, Variable *Src1) {
Context.insert(InstMIPS32Or::create(Func, Dest, Src0, Src1)); Context.insert<InstMIPS32Or>(Dest, Src0, Src1);
} }
void _ori(Variable *Dest, Variable *Src, uint32_t Imm) { void _ori(Variable *Dest, Variable *Src, uint32_t Imm) {
Context.insert(InstMIPS32Ori::create(Func, Dest, Src, Imm)); Context.insert<InstMIPS32Ori>(Dest, Src, Imm);
} }
void _sub(Variable *Dest, Variable *Src0, Variable *Src1) { void _sub(Variable *Dest, Variable *Src0, Variable *Src1) {
Context.insert(InstMIPS32Sub::create(Func, Dest, Src0, Src1)); Context.insert<InstMIPS32Sub>(Dest, Src0, Src1);
} }
void _xor(Variable *Dest, Variable *Src0, Variable *Src1) { void _xor(Variable *Dest, Variable *Src0, Variable *Src1) {
Context.insert(InstMIPS32Xor::create(Func, Dest, Src0, Src1)); Context.insert<InstMIPS32Xor>(Dest, Src0, Src1);
} }
void lowerArguments() override; void lowerArguments() override;
......
...@@ -190,7 +190,7 @@ void TargetX8632::lowerCall(const InstCall *Instr) { ...@@ -190,7 +190,7 @@ void TargetX8632::lowerCall(const InstCall *Instr) {
// Generate a FakeUse of register arguments so that they do not get dead // Generate a FakeUse of register arguments so that they do not get dead
// code eliminated as a result of the FakeKill of scratch registers after // code eliminated as a result of the FakeKill of scratch registers after
// the call. // the call.
Context.insert(InstFakeUse::create(Func, Reg)); Context.insert<InstFakeUse>(Reg);
} }
// Generate the call instruction. Assign its result to a temporary with high // Generate the call instruction. Assign its result to a temporary with high
// register allocation weight. // register allocation weight.
...@@ -244,15 +244,14 @@ void TargetX8632::lowerCall(const InstCall *Instr) { ...@@ -244,15 +244,14 @@ void TargetX8632::lowerCall(const InstCall *Instr) {
CallTarget = CallTargetVar; CallTarget = CallTargetVar;
} }
} }
Inst *NewCall = Traits::Insts::Call::create(Func, ReturnReg, CallTarget); auto *NewCall = Context.insert<Traits::Insts::Call>(ReturnReg, CallTarget);
Context.insert(NewCall);
if (NeedSandboxing) if (NeedSandboxing)
_bundle_unlock(); _bundle_unlock();
if (ReturnRegHi) if (ReturnRegHi)
Context.insert(InstFakeDef::create(Func, ReturnRegHi)); Context.insert<InstFakeDef>(ReturnRegHi);
// Insert a register-kill pseudo instruction. // Insert a register-kill pseudo instruction.
Context.insert(InstFakeKill::create(Func, NewCall)); Context.insert<InstFakeKill>(NewCall);
if (Dest != nullptr && isScalarFloatingType(Dest->getType())) { if (Dest != nullptr && isScalarFloatingType(Dest->getType())) {
// Special treatment for an FP function which returns its result in st(0). // Special treatment for an FP function which returns its result in st(0).
...@@ -262,13 +261,12 @@ void TargetX8632::lowerCall(const InstCall *Instr) { ...@@ -262,13 +261,12 @@ void TargetX8632::lowerCall(const InstCall *Instr) {
_fstp(Dest); _fstp(Dest);
// Create a fake use of Dest in case it actually isn't used, because st(0) // Create a fake use of Dest in case it actually isn't used, because st(0)
// still needs to be popped. // still needs to be popped.
Context.insert(InstFakeUse::create(Func, Dest)); Context.insert<InstFakeUse>(Dest);
} }
// Generate a FakeUse to keep the call live if necessary. // Generate a FakeUse to keep the call live if necessary.
if (Instr->hasSideEffects() && ReturnReg) { if (Instr->hasSideEffects() && ReturnReg) {
Inst *FakeUse = InstFakeUse::create(Func, ReturnReg); Context.insert<InstFakeUse>(ReturnReg);
Context.insert(FakeUse);
} }
if (!Dest) if (!Dest)
...@@ -324,7 +322,7 @@ void TargetX8632::lowerArguments() { ...@@ -324,7 +322,7 @@ void TargetX8632::lowerArguments() {
Arg->setIsArg(false); Arg->setIsArg(false);
Args[I] = RegisterArg; Args[I] = RegisterArg;
Context.insert(InstAssign::create(Func, Arg, RegisterArg)); Context.insert<InstAssign>(Arg, RegisterArg);
} }
} }
...@@ -339,7 +337,7 @@ void TargetX8632::lowerRet(const InstRet *Inst) { ...@@ -339,7 +337,7 @@ void TargetX8632::lowerRet(const InstRet *Inst) {
Variable *edx = Variable *edx =
legalizeToReg(hiOperand(Src0), Traits::RegisterSet::Reg_edx); legalizeToReg(hiOperand(Src0), Traits::RegisterSet::Reg_edx);
Reg = eax; Reg = eax;
Context.insert(InstFakeUse::create(Func, edx)); Context.insert<InstFakeUse>(edx);
} else if (isScalarFloatingType(Src0->getType())) { } else if (isScalarFloatingType(Src0->getType())) {
_fld(Src0); _fld(Src0);
} else if (isVectorType(Src0->getType())) { } else if (isVectorType(Src0->getType())) {
...@@ -469,7 +467,7 @@ void TargetX8632::addProlog(CfgNode *Node) { ...@@ -469,7 +467,7 @@ void TargetX8632::addProlog(CfgNode *Node) {
_push(ebp); _push(ebp);
_mov(ebp, esp); _mov(ebp, esp);
// Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode). // Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode).
Context.insert(InstFakeUse::create(Func, ebp)); Context.insert<InstFakeUse>(ebp);
} }
// Align the variables area. SpillAreaPaddingBytes is the size of the region // Align the variables area. SpillAreaPaddingBytes is the size of the region
...@@ -633,7 +631,7 @@ void TargetX8632::addEpilog(CfgNode *Node) { ...@@ -633,7 +631,7 @@ void TargetX8632::addEpilog(CfgNode *Node) {
// For late-stage liveness analysis (e.g. asm-verbose mode), adding a fake // For late-stage liveness analysis (e.g. asm-verbose mode), adding a fake
// use of esp before the assignment of esp=ebp keeps previous esp // use of esp before the assignment of esp=ebp keeps previous esp
// adjustments from being dead-code eliminated. // adjustments from being dead-code eliminated.
Context.insert(InstFakeUse::create(Func, esp)); Context.insert<InstFakeUse>(esp);
_mov(esp, ebp); _mov(esp, ebp);
_pop(ebp); _pop(ebp);
} else { } else {
...@@ -676,7 +674,7 @@ void TargetX8632::addEpilog(CfgNode *Node) { ...@@ -676,7 +674,7 @@ void TargetX8632::addEpilog(CfgNode *Node) {
lowerIndirectJump(T_ecx); lowerIndirectJump(T_ecx);
if (RI->getSrcSize()) { if (RI->getSrcSize()) {
auto *RetValue = llvm::cast<Variable>(RI->getSrc(0)); auto *RetValue = llvm::cast<Variable>(RI->getSrc(0));
Context.insert(InstFakeUse::create(Func, RetValue)); Context.insert<InstFakeUse>(RetValue);
} }
RI->setDeleted(); RI->setDeleted();
} }
......
...@@ -226,12 +226,12 @@ void TargetX8664::lowerCall(const InstCall *Instr) { ...@@ -226,12 +226,12 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
// Generate a FakeUse of register arguments so that they do not get dead // Generate a FakeUse of register arguments so that they do not get dead
// code eliminated as a result of the FakeKill of scratch registers after // code eliminated as a result of the FakeKill of scratch registers after
// the call. // the call.
Context.insert(InstFakeUse::create(Func, Reg)); Context.insert<InstFakeUse>(Reg);
} }
for (SizeT i = 0, NumGprArgs = GprArgs.size(); i < NumGprArgs; ++i) { for (SizeT i = 0, NumGprArgs = GprArgs.size(); i < NumGprArgs; ++i) {
Variable *Reg = legalizeToReg(GprArgs[i], getRegisterForGprArgNum(i)); Variable *Reg = legalizeToReg(GprArgs[i], getRegisterForGprArgNum(i));
Context.insert(InstFakeUse::create(Func, Reg)); Context.insert<InstFakeUse>(Reg);
} }
// Generate the call instruction. Assign its result to a temporary with high // Generate the call instruction. Assign its result to a temporary with high
...@@ -271,8 +271,7 @@ void TargetX8664::lowerCall(const InstCall *Instr) { ...@@ -271,8 +271,7 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
if (NeedSandboxing) { if (NeedSandboxing) {
llvm_unreachable("X86-64 Sandboxing codegen not implemented."); llvm_unreachable("X86-64 Sandboxing codegen not implemented.");
} }
Inst *NewCall = Traits::Insts::Call::create(Func, ReturnReg, CallTarget); auto *NewCall = Context.insert<Traits::Insts::Call>(ReturnReg, CallTarget);
Context.insert(NewCall);
if (NeedSandboxing) { if (NeedSandboxing) {
llvm_unreachable("X86-64 Sandboxing codegen not implemented."); llvm_unreachable("X86-64 Sandboxing codegen not implemented.");
} }
...@@ -286,12 +285,11 @@ void TargetX8664::lowerCall(const InstCall *Instr) { ...@@ -286,12 +285,11 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
} }
// Insert a register-kill pseudo instruction. // Insert a register-kill pseudo instruction.
Context.insert(InstFakeKill::create(Func, NewCall)); Context.insert<InstFakeKill>(NewCall);
// Generate a FakeUse to keep the call live if necessary. // Generate a FakeUse to keep the call live if necessary.
if (Instr->hasSideEffects() && ReturnReg) { if (Instr->hasSideEffects() && ReturnReg) {
Inst *FakeUse = InstFakeUse::create(Func, ReturnReg); Context.insert<InstFakeUse>(ReturnReg);
Context.insert(FakeUse);
} }
if (!Dest) if (!Dest)
...@@ -356,7 +354,7 @@ void TargetX8664::lowerArguments() { ...@@ -356,7 +354,7 @@ void TargetX8664::lowerArguments() {
Arg->setIsArg(false); Arg->setIsArg(false);
Args[i] = RegisterArg; Args[i] = RegisterArg;
Context.insert(InstAssign::create(Func, Arg, RegisterArg)); Context.insert<InstAssign>(Arg, RegisterArg);
} }
} }
...@@ -486,7 +484,7 @@ void TargetX8664::addProlog(CfgNode *Node) { ...@@ -486,7 +484,7 @@ void TargetX8664::addProlog(CfgNode *Node) {
_push(ebp); _push(ebp);
_mov(ebp, esp); _mov(ebp, esp);
// Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode). // Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode).
Context.insert(InstFakeUse::create(Func, ebp)); Context.insert<InstFakeUse>(ebp);
} }
// Align the variables area. SpillAreaPaddingBytes is the size of the region // Align the variables area. SpillAreaPaddingBytes is the size of the region
...@@ -645,7 +643,7 @@ void TargetX8664::addEpilog(CfgNode *Node) { ...@@ -645,7 +643,7 @@ void TargetX8664::addEpilog(CfgNode *Node) {
// For late-stage liveness analysis (e.g. asm-verbose mode), adding a fake // For late-stage liveness analysis (e.g. asm-verbose mode), adding a fake
// use of esp before the assignment of esp=ebp keeps previous esp // use of esp before the assignment of esp=ebp keeps previous esp
// adjustments from being dead-code eliminated. // adjustments from being dead-code eliminated.
Context.insert(InstFakeUse::create(Func, esp)); Context.insert<InstFakeUse>(esp);
_mov(esp, ebp); _mov(esp, ebp);
_pop(ebp); _pop(ebp);
} else { } else {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment