Commit 91c773e1 by Jim Stichnoth

Subzero: Improve the usability of UnimplementedError during lowering.

Provides a variant of the UnimplementedError macro specifically for use in incomplete target instruction lowering. When --skip-unimplemented is specified, the UnimplementedLoweringError macro adds FakeUse and FakeDef instructions in order to maintain consistency in liveness analysis. BUG= none R=kschimpf@google.com Review URL: https://codereview.chromium.org/1591893002 .
parent b819665a
...@@ -459,6 +459,26 @@ void TargetLowering::markRedefinitions() { ...@@ -459,6 +459,26 @@ void TargetLowering::markRedefinitions() {
} }
} }
void TargetLowering::addFakeDefUses(const Inst *Instr) {
FOREACH_VAR_IN_INST(Var, *Instr) {
if (auto *Var64 = llvm::dyn_cast<Variable64On32>(Var)) {
Context.insert<InstFakeUse>(Var64->getLo());
Context.insert<InstFakeUse>(Var64->getHi());
} else {
Context.insert<InstFakeUse>(Var);
}
}
Variable *Dest = Instr->getDest();
if (Dest == nullptr)
return;
if (auto *Var64 = llvm::dyn_cast<Variable64On32>(Dest)) {
Context.insert<InstFakeDef>(Var64->getLo());
Context.insert<InstFakeDef>(Var64->getHi());
} else {
Context.insert<InstFakeDef>(Dest);
}
}
void TargetLowering::sortVarsByAlignment(VarList &Dest, void TargetLowering::sortVarsByAlignment(VarList &Dest,
const VarList &Source) const { const VarList &Source) const {
Dest = Source; Dest = Source;
......
...@@ -45,6 +45,21 @@ namespace Ice { ...@@ -45,6 +45,21 @@ namespace Ice {
} \ } \
} while (0) } while (0)
// UnimplementedLoweringError is similar in style to UnimplementedError. Given
// a TargetLowering object pointer and an Inst pointer, it adds appropriate
// FakeDef and FakeUse instructions to try maintain liveness consistency.
#define UnimplementedLoweringError(Target, Instr) \
do { \
if ((Target)->Ctx->getFlags().getSkipUnimplemented()) { \
(Target)->addFakeDefUses(Instr); \
} else { \
/* Use llvm_unreachable instead of report_fatal_error, which gives \
better stack traces. */ \
llvm_unreachable("Not yet implemented"); \
abort(); \
} \
} while (0)
/// LoweringContext makes it easy to iterate through non-deleted instructions in /// LoweringContext makes it easy to iterate through non-deleted instructions in
/// a node, and insert new (lowered) instructions at the current point. Along /// a node, and insert new (lowered) instructions at the current point. Along
/// with the instruction list container and associated iterators, it holds the /// with the instruction list container and associated iterators, it holds the
...@@ -373,6 +388,12 @@ protected: ...@@ -373,6 +388,12 @@ protected:
/// before returning. /// before returning.
virtual void postLower() {} virtual void postLower() {}
/// When the SkipUnimplemented flag is set, addFakeDefUses() gets invoked by
/// the UnimplementedLoweringError macro to insert fake uses of all the
/// instruction variables and a fake def of the instruction dest, in order to
/// preserve integrity of liveness analysis.
void addFakeDefUses(const Inst *Instr);
/// Find (non-SSA) instructions where the Dest variable appears in some source /// Find (non-SSA) instructions where the Dest variable appears in some source
/// operand, and set the IsDestRedefined flag. This keeps liveness analysis /// operand, and set the IsDestRedefined flag. This keeps liveness analysis
/// consistent. /// consistent.
......
...@@ -2787,11 +2787,7 @@ void TargetARM32::lowerArithmetic(const InstArithmetic *Instr) { ...@@ -2787,11 +2787,7 @@ void TargetARM32::lowerArithmetic(const InstArithmetic *Instr) {
} }
if (isVectorType(DestTy)) { if (isVectorType(DestTy)) {
// Add a fake def to keep liveness consistent in the meantime. UnimplementedLoweringError(this, Instr);
Variable *T = makeReg(DestTy);
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
return; return;
} }
...@@ -3496,10 +3492,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) { ...@@ -3496,10 +3492,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
return; return;
case InstCast::Sext: { case InstCast::Sext: {
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType()); UnimplementedLoweringError(this, Inst);
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
} else if (Dest->getType() == IceType_i64) { } else if (Dest->getType() == IceType_i64) {
// t1=sxtb src; t2= mov t1 asr #31; dst.lo=t1; dst.hi=t2 // t1=sxtb src; t2= mov t1 asr #31; dst.lo=t1; dst.hi=t2
Constant *ShiftAmt = Ctx->getConstantInt32(31); Constant *ShiftAmt = Ctx->getConstantInt32(31);
...@@ -3544,10 +3537,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) { ...@@ -3544,10 +3537,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
} }
case InstCast::Zext: { case InstCast::Zext: {
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType()); UnimplementedLoweringError(this, Inst);
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
} else if (Dest->getType() == IceType_i64) { } else if (Dest->getType() == IceType_i64) {
// t1=uxtb src; dst.lo=t1; dst.hi=0 // t1=uxtb src; dst.lo=t1; dst.hi=0
Operand *_0 = Operand *_0 =
...@@ -3600,10 +3590,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) { ...@@ -3600,10 +3590,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
} }
case InstCast::Trunc: { case InstCast::Trunc: {
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType()); UnimplementedLoweringError(this, Inst);
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
} else { } else {
if (Src0->getType() == IceType_i64) if (Src0->getType() == IceType_i64)
Src0 = loOperand(Src0); Src0 = loOperand(Src0);
...@@ -3623,10 +3610,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) { ...@@ -3623,10 +3610,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
// fpext: dest.f64 = fptrunc src0.fp32 // fpext: dest.f64 = fptrunc src0.fp32
const bool IsTrunc = CastKind == InstCast::Fptrunc; const bool IsTrunc = CastKind == InstCast::Fptrunc;
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType()); UnimplementedLoweringError(this, Inst);
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
break; break;
} }
assert(Dest->getType() == (IsTrunc ? IceType_f32 : IceType_f64)); assert(Dest->getType() == (IsTrunc ? IceType_f32 : IceType_f64));
...@@ -3640,10 +3624,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) { ...@@ -3640,10 +3624,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
case InstCast::Fptosi: case InstCast::Fptosi:
case InstCast::Fptoui: { case InstCast::Fptoui: {
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType()); UnimplementedLoweringError(this, Inst);
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
break; break;
} }
...@@ -3679,10 +3660,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) { ...@@ -3679,10 +3660,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
case InstCast::Sitofp: case InstCast::Sitofp:
case InstCast::Uitofp: { case InstCast::Uitofp: {
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType()); UnimplementedLoweringError(this, Inst);
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
break; break;
} }
const bool SourceIsSigned = CastKind == InstCast::Sitofp; const bool SourceIsSigned = CastKind == InstCast::Sitofp;
...@@ -3731,13 +3709,13 @@ void TargetARM32::lowerCast(const InstCast *Inst) { ...@@ -3731,13 +3709,13 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
case IceType_void: case IceType_void:
llvm::report_fatal_error("Unexpected bitcast."); llvm::report_fatal_error("Unexpected bitcast.");
case IceType_i1: case IceType_i1:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
case IceType_i8: case IceType_i8:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
case IceType_i16: case IceType_i16:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
case IceType_i32: case IceType_i32:
case IceType_f32: { case IceType_f32: {
...@@ -3784,11 +3762,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) { ...@@ -3784,11 +3762,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
case IceType_v16i8: case IceType_v16i8:
case IceType_v4f32: case IceType_v4f32:
case IceType_v4i32: { case IceType_v4i32: {
// avoid liveness errors UnimplementedLoweringError(this, Inst);
Variable *T = makeReg(DestType);
Context.insert<InstFakeDef>(T, legalizeToReg(Src0));
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
break; break;
} }
} }
...@@ -3798,12 +3772,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) { ...@@ -3798,12 +3772,7 @@ void TargetARM32::lowerCast(const InstCast *Inst) {
} }
void TargetARM32::lowerExtractElement(const InstExtractElement *Inst) { void TargetARM32::lowerExtractElement(const InstExtractElement *Inst) {
Variable *Dest = Inst->getDest(); UnimplementedLoweringError(this, Inst);
Type DestType = Dest->getType();
Variable *T = makeReg(DestType);
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
} }
namespace { namespace {
...@@ -3882,10 +3851,7 @@ TargetARM32::CondWhenTrue TargetARM32::lowerFcmpCond(const InstFcmp *Instr) { ...@@ -3882,10 +3851,7 @@ TargetARM32::CondWhenTrue TargetARM32::lowerFcmpCond(const InstFcmp *Instr) {
void TargetARM32::lowerFcmp(const InstFcmp *Instr) { void TargetARM32::lowerFcmp(const InstFcmp *Instr) {
Variable *Dest = Instr->getDest(); Variable *Dest = Instr->getDest();
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType()); UnimplementedLoweringError(this, Instr);
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
return; return;
} }
...@@ -4181,10 +4147,7 @@ void TargetARM32::lowerIcmp(const InstIcmp *Inst) { ...@@ -4181,10 +4147,7 @@ void TargetARM32::lowerIcmp(const InstIcmp *Inst) {
Variable *Dest = Inst->getDest(); Variable *Dest = Inst->getDest();
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Variable *T = makeReg(Dest->getType()); UnimplementedLoweringError(this, Inst);
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
return; return;
} }
...@@ -4204,8 +4167,7 @@ void TargetARM32::lowerIcmp(const InstIcmp *Inst) { ...@@ -4204,8 +4167,7 @@ void TargetARM32::lowerIcmp(const InstIcmp *Inst) {
} }
void TargetARM32::lowerInsertElement(const InstInsertElement *Inst) { void TargetARM32::lowerInsertElement(const InstInsertElement *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
namespace { namespace {
...@@ -4749,10 +4711,7 @@ void TargetARM32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { ...@@ -4749,10 +4711,7 @@ void TargetARM32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
Type DestTy = Dest->getType(); Type DestTy = Dest->getType();
Variable *T = makeReg(DestTy); Variable *T = makeReg(DestTy);
if (isVectorType(DestTy)) { if (isVectorType(DestTy)) {
// Add a fake def to keep liveness consistent in the meantime. UnimplementedLoweringError(this, Instr);
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
return; return;
} }
_vabs(T, legalizeToReg(Instr->getArg(0))); _vabs(T, legalizeToReg(Instr->getArg(0)));
...@@ -5343,10 +5302,7 @@ void TargetARM32::lowerSelect(const InstSelect *Inst) { ...@@ -5343,10 +5302,7 @@ void TargetARM32::lowerSelect(const InstSelect *Inst) {
Operand *Condition = Inst->getCondition(); Operand *Condition = Inst->getCondition();
if (isVectorType(DestTy)) { if (isVectorType(DestTy)) {
Variable *T = makeReg(DestTy); UnimplementedLoweringError(this, Inst);
Context.insert<InstFakeDef>(T);
_mov(Dest, T);
UnimplementedError(Func->getContext()->getFlags());
return; return;
} }
......
...@@ -557,32 +557,49 @@ void TargetMIPS32::lowerAlloca(const InstAlloca *Inst) { ...@@ -557,32 +557,49 @@ void TargetMIPS32::lowerAlloca(const InstAlloca *Inst) {
// after the alloca. The stack alignment restriction can be relaxed in some // after the alloca. The stack alignment restriction can be relaxed in some
// cases. // cases.
NeedsStackAlignment = true; NeedsStackAlignment = true;
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) { void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) {
Variable *Dest = Inst->getDest(); Variable *Dest = Inst->getDest();
Operand *Src0 = legalizeUndef(Inst->getSrc(0)); // We need to signal all the UnimplementedLoweringError errors before any
Operand *Src1 = legalizeUndef(Inst->getSrc(1)); // legalization into new variables, otherwise Om1 register allocation may fail
// when it sees variables that are defined but not used.
if (Dest->getType() == IceType_i64) { if (Dest->getType() == IceType_i64) {
// TODO(reed kotler): fakedef needed for now until all cases are implemented UnimplementedLoweringError(this, Inst);
auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
Context.insert<InstFakeDef>(DestLo);
Context.insert<InstFakeDef>(DestHi);
UnimplementedError(Func->getContext()->getFlags());
return; return;
} }
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
Context.insert<InstFakeDef>(Dest); UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
return; return;
} }
// Dest->getType() is non-i64 scalar switch (Inst->getOp()) {
default:
break;
case InstArithmetic::Shl:
case InstArithmetic::Lshr:
case InstArithmetic::Ashr:
case InstArithmetic::Udiv:
case InstArithmetic::Sdiv:
case InstArithmetic::Urem:
case InstArithmetic::Srem:
case InstArithmetic::Fadd:
case InstArithmetic::Fsub:
case InstArithmetic::Fmul:
case InstArithmetic::Fdiv:
case InstArithmetic::Frem:
UnimplementedLoweringError(this, Inst);
return;
}
// At this point Dest->getType() is non-i64 scalar
Variable *T = makeReg(Dest->getType()); Variable *T = makeReg(Dest->getType());
Operand *Src0 = legalizeUndef(Inst->getSrc(0));
Operand *Src1 = legalizeUndef(Inst->getSrc(1));
Variable *Src0R = legalizeToReg(Src0); Variable *Src0R = legalizeToReg(Src0);
Variable *Src1R = legalizeToReg(Src1); Variable *Src1R = legalizeToReg(Src1);
switch (Inst->getOp()) { switch (Inst->getOp()) {
case InstArithmetic::_num: case InstArithmetic::_num:
break; break;
...@@ -636,12 +653,6 @@ void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) { ...@@ -636,12 +653,6 @@ void TargetMIPS32::lowerArithmetic(const InstArithmetic *Inst) {
case InstArithmetic::Frem: case InstArithmetic::Frem:
break; break;
} }
// TODO(reed kotler):
// fakedef and fakeuse needed for now until all cases are implemented
Context.insert<InstFakeUse>(Src0R);
Context.insert<InstFakeUse>(Src1R);
Context.insert<InstFakeDef>(Dest);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerAssign(const InstAssign *Inst) { void TargetMIPS32::lowerAssign(const InstAssign *Inst) {
...@@ -675,7 +686,7 @@ void TargetMIPS32::lowerAssign(const InstAssign *Inst) { ...@@ -675,7 +686,7 @@ void TargetMIPS32::lowerAssign(const InstAssign *Inst) {
SrcR = legalize(Src0, Legal_Reg); SrcR = legalize(Src0, Legal_Reg);
} }
if (isVectorType(Dest->getType())) { if (isVectorType(Dest->getType())) {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
} else { } else {
_mov(Dest, SrcR); _mov(Dest, SrcR);
} }
...@@ -683,13 +694,11 @@ void TargetMIPS32::lowerAssign(const InstAssign *Inst) { ...@@ -683,13 +694,11 @@ void TargetMIPS32::lowerAssign(const InstAssign *Inst) {
} }
void TargetMIPS32::lowerBr(const InstBr *Inst) { void TargetMIPS32::lowerBr(const InstBr *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerCall(const InstCall *Inst) { void TargetMIPS32::lowerCall(const InstCall *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerCast(const InstCast *Inst) { void TargetMIPS32::lowerCast(const InstCast *Inst) {
...@@ -699,112 +708,108 @@ void TargetMIPS32::lowerCast(const InstCast *Inst) { ...@@ -699,112 +708,108 @@ void TargetMIPS32::lowerCast(const InstCast *Inst) {
Func->setError("Cast type not supported"); Func->setError("Cast type not supported");
return; return;
case InstCast::Sext: { case InstCast::Sext: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
} }
case InstCast::Zext: { case InstCast::Zext: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
} }
case InstCast::Trunc: { case InstCast::Trunc: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
} }
case InstCast::Fptrunc: case InstCast::Fptrunc:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
case InstCast::Fpext: { case InstCast::Fpext: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
} }
case InstCast::Fptosi: case InstCast::Fptosi:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
case InstCast::Fptoui: case InstCast::Fptoui:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
case InstCast::Sitofp: case InstCast::Sitofp:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
case InstCast::Uitofp: { case InstCast::Uitofp: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
} }
case InstCast::Bitcast: { case InstCast::Bitcast: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
break; break;
} }
} }
} }
void TargetMIPS32::lowerExtractElement(const InstExtractElement *Inst) { void TargetMIPS32::lowerExtractElement(const InstExtractElement *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerFcmp(const InstFcmp *Inst) { void TargetMIPS32::lowerFcmp(const InstFcmp *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerIcmp(const InstIcmp *Inst) { void TargetMIPS32::lowerIcmp(const InstIcmp *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerInsertElement(const InstInsertElement *Inst) { void TargetMIPS32::lowerInsertElement(const InstInsertElement *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
switch (Instr->getIntrinsicInfo().ID) { switch (Instr->getIntrinsicInfo().ID) {
case Intrinsics::AtomicCmpxchg: { case Intrinsics::AtomicCmpxchg: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::AtomicFence: case Intrinsics::AtomicFence:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
case Intrinsics::AtomicFenceAll: case Intrinsics::AtomicFenceAll:
// NOTE: FenceAll should prevent and load/store from being moved across the // NOTE: FenceAll should prevent and load/store from being moved across the
// fence (both atomic and non-atomic). The InstMIPS32Mfence instruction is // fence (both atomic and non-atomic). The InstMIPS32Mfence instruction is
// currently marked coarsely as "HasSideEffects". // currently marked coarsely as "HasSideEffects".
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
case Intrinsics::AtomicIsLockFree: { case Intrinsics::AtomicIsLockFree: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::AtomicLoad: { case Intrinsics::AtomicLoad: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::AtomicRMW: case Intrinsics::AtomicRMW:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
case Intrinsics::AtomicStore: { case Intrinsics::AtomicStore: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::Bswap: { case Intrinsics::Bswap: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::Ctpop: { case Intrinsics::Ctpop: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::Ctlz: { case Intrinsics::Ctlz: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::Cttz: { case Intrinsics::Cttz: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::Fabs: { case Intrinsics::Fabs: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::Longjmp: { case Intrinsics::Longjmp: {
...@@ -848,7 +853,7 @@ void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { ...@@ -848,7 +853,7 @@ void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
} }
case Intrinsics::NaClReadTP: { case Intrinsics::NaClReadTP: {
if (Ctx->getFlags().getUseSandboxing()) { if (Ctx->getFlags().getUseSandboxing()) {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
} else { } else {
InstCall *Call = makeHelperCall(H_call_read_tp, Instr->getDest(), 0); InstCall *Call = makeHelperCall(H_call_read_tp, Instr->getDest(), 0);
lowerCall(Call); lowerCall(Call);
...@@ -862,19 +867,19 @@ void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { ...@@ -862,19 +867,19 @@ void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
return; return;
} }
case Intrinsics::Sqrt: { case Intrinsics::Sqrt: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::Stacksave: { case Intrinsics::Stacksave: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::Stackrestore: { case Intrinsics::Stackrestore: {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
} }
case Intrinsics::Trap: case Intrinsics::Trap:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Instr);
return; return;
case Intrinsics::UnknownIntrinsic: case Intrinsics::UnknownIntrinsic:
Func->setError("Should not be lowering UnknownIntrinsic"); Func->setError("Should not be lowering UnknownIntrinsic");
...@@ -884,8 +889,7 @@ void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) { ...@@ -884,8 +889,7 @@ void TargetMIPS32::lowerIntrinsicCall(const InstIntrinsicCall *Instr) {
} }
void TargetMIPS32::lowerLoad(const InstLoad *Inst) { void TargetMIPS32::lowerLoad(const InstLoad *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::doAddressOptLoad() { void TargetMIPS32::doAddressOptLoad() {
...@@ -929,20 +933,18 @@ void TargetMIPS32::lowerRet(const InstRet *Inst) { ...@@ -929,20 +933,18 @@ void TargetMIPS32::lowerRet(const InstRet *Inst) {
} }
default: default:
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
} }
} }
_ret(getPhysicalRegister(RegMIPS32::Reg_RA), Reg); _ret(getPhysicalRegister(RegMIPS32::Reg_RA), Reg);
} }
void TargetMIPS32::lowerSelect(const InstSelect *Inst) { void TargetMIPS32::lowerSelect(const InstSelect *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerStore(const InstStore *Inst) { void TargetMIPS32::lowerStore(const InstStore *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::doAddressOptStore() { void TargetMIPS32::doAddressOptStore() {
...@@ -950,12 +952,11 @@ void TargetMIPS32::doAddressOptStore() { ...@@ -950,12 +952,11 @@ void TargetMIPS32::doAddressOptStore() {
} }
void TargetMIPS32::lowerSwitch(const InstSwitch *Inst) { void TargetMIPS32::lowerSwitch(const InstSwitch *Inst) {
(void)Inst; UnimplementedLoweringError(this, Inst);
UnimplementedError(Func->getContext()->getFlags());
} }
void TargetMIPS32::lowerUnreachable(const InstUnreachable * /*Inst*/) { void TargetMIPS32::lowerUnreachable(const InstUnreachable *Inst) {
UnimplementedError(Func->getContext()->getFlags()); UnimplementedLoweringError(this, Inst);
} }
// Turn an i64 Phi instruction into a pair of i32 Phi instructions, to preserve // Turn an i64 Phi instruction into a pair of i32 Phi instructions, to preserve
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment