Commit 91c773e1 by Jim Stichnoth

Subzero: Improve the usability of UnimplementedError during lowering.

Provides a variant of the UnimplementedError macro specifically for use in incomplete target instruction lowering. When --skip-unimplemented is specified, the UnimplementedLoweringError macro adds FakeUse and FakeDef instructions in order to maintain consistency in liveness analysis. BUG= none R=kschimpf@google.com Review URL: https://codereview.chromium.org/1591893002 .
parent b819665a
......@@ -459,6 +459,26 @@ void TargetLowering::markRedefinitions() {
}
}
void TargetLowering::addFakeDefUses(const Inst *Instr) {
FOREACH_VAR_IN_INST(Var, *Instr) {
if (auto *Var64 = llvm::dyn_cast<Variable64On32>(Var)) {
Context.insert<InstFakeUse>(Var64->getLo());
Context.insert<InstFakeUse>(Var64->getHi());
} else {
Context.insert<InstFakeUse>(Var);
}
}
Variable *Dest = Instr->getDest();
if (Dest == nullptr)
return;
if (auto *Var64 = llvm::dyn_cast<Variable64On32>(Dest)) {
Context.insert<InstFakeDef>(Var64->getLo());
Context.insert<InstFakeDef>(Var64->getHi());
} else {
Context.insert<InstFakeDef>(Dest);
}
}
void TargetLowering::sortVarsByAlignment(VarList &Dest,
const VarList &Source) const {
Dest = Source;
......
......@@ -45,6 +45,21 @@ namespace Ice {
} \
} while (0)
// UnimplementedLoweringError is similar in style to UnimplementedError. Given
// a TargetLowering object pointer and an Inst pointer, it adds appropriate
// FakeDef and FakeUse instructions to try maintain liveness consistency.
#define UnimplementedLoweringError(Target, Instr) \
do { \
if ((Target)->Ctx->getFlags().getSkipUnimplemented()) { \
(Target)->addFakeDefUses(Instr); \
} else { \
/* Use llvm_unreachable instead of report_fatal_error, which gives \
better stack traces. */ \
llvm_unreachable("Not yet implemented"); \
abort(); \
} \
} while (0)
/// LoweringContext makes it easy to iterate through non-deleted instructions in
/// a node, and insert new (lowered) instructions at the current point. Along
/// with the instruction list container and associated iterators, it holds the
......@@ -373,6 +388,12 @@ protected:
/// before returning.
virtual void postLower() {}
/// When the SkipUnimplemented flag is set, addFakeDefUses() gets invoked by
/// the UnimplementedLoweringError macro to insert fake uses of all the
/// instruction variables and a fake def of the instruction dest, in order to
/// preserve integrity of liveness analysis.
void addFakeDefUses(const Inst *Instr);
/// Find (non-SSA) instructions where the Dest variable appears in some source
/// operand, and set the IsDestRedefined flag. This keeps liveness analysis
/// consistent.
......
......@@ -2787,11 +2787,7 @@ void TargetARM32::lowerArithmetic(const InstArithmetic *Instr) {
}
if (isVectorType(DestTy)) {
// Add a fake def to keep liveness consistent in the meantime.
Variable *T = makeReg(DestTy);
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
......@@ -3496,10 +3492,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
return;
case InstCast::Sext: {
if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType());
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
} else if (Dest->getType() == IceType_i64) {
// t1=sxtb src; t2= mov t1 asr #31; dst.lo=t1; dst.hi=t2
Constant *ShiftAmt = Ctx->getConstantInt32(31);
......@@ -3544,10 +3537,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
}
case InstCast::Zext: {
if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType());
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
} else if (Dest->getType() == IceType_i64) {
// t1=uxtb src; dst.lo=t1; dst.hi=0
Operand *_0 =
......@@ -3600,10 +3590,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
}
case InstCast::Trunc: {
if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType());
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
} else {
if (Src0->getType() == IceType_i64)
Src0 = loOperand(Src0);
......@@ -3623,10 +3610,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
// fpext: dest.f64 = fptrunc src0.fp32
const bool IsTrunc = CastKind == InstCast::Fptrunc;
if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType());
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
assert(Dest->getType() == (IsTrunc ? IceType_f32 : IceType_f64));
......@@ -3640,10 +3624,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
case InstCast::Fptosi:
case InstCast::Fptoui: {
if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType());
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
......@@ -3679,10 +3660,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
case InstCast::Sitofp:
case InstCast::Uitofp: {
if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType());
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
const bool SourceIsSigned = CastKind == InstCast::Sitofp;
......@@ -3731,13 +3709,13 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
case IceType_void:
llvm::report_fatal_error("Unexpected bitcast.");
case IceType_i1:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
case IceType_i8:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
case IceType_i16:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
case IceType_i32:
case IceType_f32: {
......@@ -3784,11 +3762,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
case IceType_v16i8:
case IceType_v4f32:
case IceType_v4i32: {
// avoid liveness errors
Variable *T = makeReg(DestType);
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
}
......@@ -3798,12 +3772,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
}
void TargetARM32::lowerExtractElement(const InstExtractElement *Inst) {
Variable *Dest = Inst->getDest();
Type DestType = Dest->getType();
Variable *T = makeReg(DestType);
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
namespace {
......@@ -3882,10 +3851,7 @@ TargetARM32::CondWhenTrue TargetARM32::lowerFcmpCond(const InstFcmp *Instr) {
void TargetARM32::lowerFcmp(const InstFcmp *Instr) {
Variable *Dest = Instr->getDest();
if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType());
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
......@@ -4181,10 +4147,7 @@ void TargetARM32::lowerIcmp(const InstIcmp *Inst) {
Variable *Dest = Inst->getDest();
if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType());
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
return;
}
......@@ -4204,8 +4167,7 @@ void TargetARM32::lowerIcmp(const InstIcmp *Inst) {
}
void TargetARM32::lowerInsertElement(const InstInsertElement *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
namespace {
......@@ -4749,10 +4711,7 @@ void TargetARM32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
Type DestTy = Dest->getType();
Variable *T = makeReg(DestTy);
if (isVectorType(DestTy)) {
// Add a fake def to keep liveness consistent in the meantime.
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
_vabs(T, legalizeToReg(Instr->getArg(0)));
......@@ -5343,10 +5302,7 @@ void TargetARM32::lowerSelect(const InstSelect *Inst) {
Operand *Condition = Inst->getCondition();
if (isVectorType(DestTy)) {
Variable *T = makeReg(DestTy);
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
return;
}
......
......@@ -557,32 +557,49 @@ void TargetMIPS32::lowerAlloca(const InstAlloca *Inst) {
// after the alloca. The stack alignment restriction can be relaxed in some
// cases.
NeedsStackAlignment = true;
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) {
Variable *Dest = Inst->getDest();
Operand *Src0 = legalizeUndef(Inst->getSrc(0));
Operand *Src1 = legalizeUndef(Inst->getSrc(1));
// We need to signal all the UnimplementedLoweringError errors before any
// legalization into new variables, otherwise Om1 register allocation may fail
// when it sees variables that are defined but not used.
if (Dest->getType() == IceType_i64) {
// TODO(reed kotler): fakedef needed for now until all cases are implemented
auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
Context.insert<InstFakeDef>(DestLo);
Context.insert<InstFakeDef>(DestHi);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
return;
}
if (isVectorType(Dest->getType())) {
Context.insert<InstFakeDef>(Dest);
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
return;
}
// Dest->getType() is non-i64 scalar
switch (Inst->getOp()) {
default:
break;
case InstArithmetic::Shl:
case InstArithmetic::Lshr:
case InstArithmetic::Ashr:
case InstArithmetic::Udiv:
case InstArithmetic::Sdiv:
case InstArithmetic::Urem:
case InstArithmetic::Srem:
case InstArithmetic::Fadd:
case InstArithmetic::Fsub:
case InstArithmetic::Fmul:
case InstArithmetic::Fdiv:
case InstArithmetic::Frem:
UnimplementedLoweringError(this, Inst);
return;
}
// At this point Dest->getType() is non-i64 scalar
Variable *T = makeReg(Dest->getType());
Operand *Src0 = legalizeUndef(Inst->getSrc(0));
Operand *Src1 = legalizeUndef(Inst->getSrc(1));
Variable *Src0R = legalizeToReg(Src0);
Variable *Src1R = legalizeToReg(Src1);
switch (Inst->getOp()) {
case InstArithmetic::_num:
break;
......@@ -636,12 +653,6 @@ void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) {
case InstArithmetic::Frem:
break;
}
// TODO(reed kotler):
// fakedef and fakeuse needed for now until all cases are implemented
Context.insert<InstFakeUse>(Src0R);
Context.insert<InstFakeUse>(Src1R);
Context.insert<InstFakeDef>(Dest);
UnimplementedError(Func->getContext()->getFlags());
}
void TargetMIPS32::lowerAssign(const InstAssign *Inst) {
......@@ -675,7 +686,7 @@ void TargetMIPS32::lowerAssign(const InstAssign *Inst) {
SrcR = legalize(Src0, Legal_Reg);
}
if (isVectorType(Dest->getType())) {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
} else {
_mov(Dest, SrcR);
}
......@@ -683,13 +694,11 @@ void TargetMIPS32::lowerAssign(const InstAssign *Inst) {
}
void TargetMIPS32::lowerBr(const InstBr *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::lowerCall(const InstCall *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::lowerCast(const InstCast *Inst) {
......@@ -699,112 +708,108 @@ void TargetMIPS32::lowerCast(const InstCast *Inst) {
Func->setError("Cast type not supported");
return;
case InstCast::Sext: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
case InstCast::Zext: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
case InstCast::Trunc: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
case InstCast::Fptrunc:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
case InstCast::Fpext: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
case InstCast::Fptosi:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
case InstCast::Fptoui:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
case InstCast::Sitofp:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
case InstCast::Uitofp: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
case InstCast::Bitcast: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
break;
}
}
}
void TargetMIPS32::lowerExtractElement(const InstExtractElement *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::lowerFcmp(const InstFcmp *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::lowerIcmp(const InstIcmp *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::lowerInsertElement(const InstInsertElement *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
switch (Instr->getIntrinsicInfo().ID) {
case Intrinsics::AtomicCmpxchg: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::AtomicFence:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
case Intrinsics::AtomicFenceAll:
// NOTE: FenceAll should prevent and load/store from being moved across the
// fence (both atomic and non-atomic). The InstMIPS32Mfence instruction is
// currently marked coarsely as "HasSideEffects".
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
case Intrinsics::AtomicIsLockFree: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::AtomicLoad: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::AtomicRMW:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
case Intrinsics::AtomicStore: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::Bswap: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::Ctpop: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::Ctlz: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::Cttz: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::Fabs: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::Longjmp: {
......@@ -848,7 +853,7 @@ void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
}
case Intrinsics::NaClReadTP: {
if (Ctx->getFlags().getUseSandboxing()) {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
} else {
InstCall *Call = makeHelperCall(H_call_read_tp, Instr->getDest(), 0);
lowerCall(Call);
......@@ -862,19 +867,19 @@ void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
return;
}
case Intrinsics::Sqrt: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::Stacksave: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::Stackrestore: {
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
}
case Intrinsics::Trap:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Instr);
return;
case Intrinsics::UnknownIntrinsic:
Func->setError("Should not be lowering UnknownIntrinsic");
......@@ -884,8 +889,7 @@ void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
}
void TargetMIPS32::lowerLoad(const InstLoad *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::doAddressOptLoad() {
......@@ -929,20 +933,18 @@ void TargetMIPS32::lowerRet(const InstRet *Inst) {
}
default:
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
}
_ret(getPhysicalRegister(RegMIPS32::Reg_RA), Reg);
}
void TargetMIPS32::lowerSelect(const InstSelect *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::lowerStore(const InstStore *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::doAddressOptStore() {
......@@ -950,12 +952,11 @@ void TargetMIPS32::doAddressOptStore() {
}
void TargetMIPS32::lowerSwitch(const InstSwitch *Inst) {
(void)Inst;
UnimplementedError(Func->getContext()->getFlags());
UnimplementedLoweringError(this, Inst);
}
void TargetMIPS32::lowerUnreachable(const InstUnreachable * /*Inst*/) {
UnimplementedError(Func->getContext()->getFlags());
void TargetMIPS32::lowerUnreachable(const InstUnreachable *Inst) {
UnimplementedLoweringError(this, Inst);
}
// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to preserve
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment