Commit 2d6c8267 by Jim Stichnoth

Subzero: Clean up some uses of *_cast<>.

Some casts to size_t for use as array indexes are simply unnecessary. Some explicit declaration types are changed to "auto" to avoid redundancy with the static_cast type. A few llvm::dyn_cast<> operations are changed to llvm::cast<>, and vice versa. A few explicit declaration types are changed to "auto" when used with llvm::cast<> and llvm::dynamic_cast<>. Some of these were missed during an earlier cleansing because of multi-line issues. There are still a few opportunities related to Variable register numbers, but they are ignored for now because they are being addressed in another CL. BUG= none R=jpp@chromium.org Review URL: https://codereview.chromium.org/1674033002 .
parent 28df6bad
......@@ -706,7 +706,7 @@ Label *AssemblerARM32::getOrCreateLabel(SizeT Number, LabelVector &Labels) {
// Pull out offset from branch Inst.
IOffsetT AssemblerARM32::decodeBranchOffset(IValueT Inst) {
// Sign-extend, left-shift by 2, and adjust to the way ARM CPUs read PC.
IOffsetT Offset = static_cast<IOffsetT>((Inst & kBranchOffsetMask) << 8);
const IOffsetT Offset = (Inst & kBranchOffsetMask) << 8;
return (Offset >> 6) + kPCReadOffset;
}
......
......@@ -916,8 +916,7 @@ bool Cfg::validateLiveness() const {
// of the block, because a Phi temporary may be live at the end of
// the previous block, and if it is also assigned in the first
// instruction of this block, the adjacent live ranges get merged.
if (static_cast<class Inst *>(&Instr) != FirstInst &&
!Instr.isDestRedefined() &&
if (&Instr != FirstInst && !Instr.isDestRedefined() &&
Dest->getLiveRange().containsValue(InstNumber - 1, IsDest))
Invalid = true;
if (Invalid) {
......
......@@ -280,10 +280,8 @@ IceString InstArithmetic::getInstName() const {
}
const char *InstArithmetic::getOpName(OpKind Op) {
size_t OpIndex = static_cast<size_t>(Op);
return OpIndex < InstArithmetic::_num
? InstArithmeticAttributes[OpIndex].DisplayString
: "???";
return Op < InstArithmetic::_num ? InstArithmeticAttributes[Op].DisplayString
: "???";
}
bool InstArithmetic::isCommutative() const {
......@@ -729,9 +727,8 @@ void InstCall::dump(const Cfg *Func) const {
}
const char *InstCast::getCastName(InstCast::OpKind Kind) {
size_t Index = static_cast<size_t>(Kind);
if (Index < InstCast::OpKind::_num)
return InstCastAttributes[Index].DisplayString;
if (Kind < InstCast::OpKind::_num)
return InstCastAttributes[Kind].DisplayString;
llvm_unreachable("Invalid InstCast::OpKind");
return "???";
}
......
......@@ -348,7 +348,7 @@ public:
static StackVariable *create(Cfg *Func, Type Ty, SizeT Index) {
return new (Func->allocate<StackVariable>()) StackVariable(Ty, Index);
}
const static OperandKind StackVariableKind =
constexpr static auto StackVariableKind =
static_cast<OperandKind>(kVariable_Target);
static bool classof(const Operand *Operand) {
return Operand->getKind() == StackVariableKind;
......
......@@ -136,9 +136,8 @@ const struct TableIcmp64_ {
};
CondARM32::Cond getIcmp32Mapping(InstIcmp::ICond Cond) {
size_t Index = static_cast<size_t>(Cond);
assert(Index < llvm::array_lengthof(TableIcmp32));
return TableIcmp32[Index].Mapping;
assert(Cond < llvm::array_lengthof(TableIcmp32));
return TableIcmp32[Cond].Mapping;
}
// In some cases, there are x-macros tables for both high-level and low-level
......@@ -3952,8 +3951,7 @@ void TargetARM32::lowerFcmp(const InstFcmp *Instr) {
TargetARM32::CondWhenTrue
TargetARM32::lowerInt64IcmpCond(InstIcmp::ICond Condition, Operand *Src0,
Operand *Src1) {
size_t Index = static_cast<size_t>(Condition);
assert(Index < llvm::array_lengthof(TableIcmp64));
assert(Condition < llvm::array_lengthof(TableIcmp64));
Int32Operands SrcsLo(loOperand(Src0), loOperand(Src1));
Int32Operands SrcsHi(hiOperand(Src0), hiOperand(Src1));
......@@ -3971,7 +3969,7 @@ TargetARM32::lowerInt64IcmpCond(InstIcmp::ICond Condition, Operand *Src0,
Variable *Src0HiR = SrcsHi.src0R(this);
_orrs(T, Src0LoR, Src0HiR);
Context.insert<InstFakeUse>(T);
return CondWhenTrue(TableIcmp64[Index].C1);
return CondWhenTrue(TableIcmp64[Condition].C1);
}
Variable *Src0RLo = SrcsLo.src0R(this);
......@@ -3979,10 +3977,11 @@ TargetARM32::lowerInt64IcmpCond(InstIcmp::ICond Condition, Operand *Src0,
Operand *Src1RFLo = SrcsLo.src1RF(this);
Operand *Src1RFHi = ValueLo == ValueHi ? Src1RFLo : SrcsHi.src1RF(this);
const bool UseRsb = TableIcmp64[Index].Swapped != SrcsLo.swappedOperands();
const bool UseRsb =
TableIcmp64[Condition].Swapped != SrcsLo.swappedOperands();
if (UseRsb) {
if (TableIcmp64[Index].IsSigned) {
if (TableIcmp64[Condition].IsSigned) {
Variable *T = makeReg(IceType_i32);
_rsbs(T, Src0RLo, Src1RFLo);
Context.insert<InstFakeUse>(T);
......@@ -4003,7 +4002,7 @@ TargetARM32::lowerInt64IcmpCond(InstIcmp::ICond Condition, Operand *Src0,
Context.insert<InstFakeUse>(T);
}
} else {
if (TableIcmp64[Index].IsSigned) {
if (TableIcmp64[Condition].IsSigned) {
_cmp(Src0RLo, Src1RFLo);
Variable *T = makeReg(IceType_i32);
_sbcs(T, Src0RHi, Src1RFHi);
......@@ -4014,12 +4013,12 @@ TargetARM32::lowerInt64IcmpCond(InstIcmp::ICond Condition, Operand *Src0,
}
}
return CondWhenTrue(TableIcmp64[Index].C1);
return CondWhenTrue(TableIcmp64[Condition].C1);
}
Variable *Src0RLo, *Src0RHi;
Operand *Src1RFLo, *Src1RFHi;
if (TableIcmp64[Index].Swapped) {
if (TableIcmp64[Condition].Swapped) {
Src0RLo = legalizeToReg(loOperand(Src1));
Src0RHi = legalizeToReg(hiOperand(Src1));
Src1RFLo = legalizeToReg(loOperand(Src0));
......@@ -4060,7 +4059,7 @@ TargetARM32::lowerInt64IcmpCond(InstIcmp::ICond Condition, Operand *Src0,
//
// So, we are going with the GCC version since it's usually better (except
// perhaps for eq/ne). We could revisit special-casing eq/ne later.
if (TableIcmp64[Index].IsSigned) {
if (TableIcmp64[Condition].IsSigned) {
Variable *ScratchReg = makeReg(IceType_i32);
_cmp(Src0RLo, Src1RFLo);
_sbcs(ScratchReg, Src0RHi, Src1RFHi);
......@@ -4071,7 +4070,7 @@ TargetARM32::lowerInt64IcmpCond(InstIcmp::ICond Condition, Operand *Src0,
_cmp(Src0RHi, Src1RFHi);
_cmp(Src0RLo, Src1RFLo, CondARM32::EQ);
}
return CondWhenTrue(TableIcmp64[Index].C1);
return CondWhenTrue(TableIcmp64[Condition].C1);
}
TargetARM32::CondWhenTrue
......
......@@ -1245,7 +1245,7 @@ Operand *TargetMIPS32::legalize(Operand *From, LegalMask Allowed,
Context.insert<InstFakeDef>(Reg);
return Reg;
} else if (auto *C32 = llvm::dyn_cast<ConstantInteger32>(From)) {
uint32_t Value = static_cast<uint32_t>(C32->getValue());
const uint32_t Value = C32->getValue();
// Check if the immediate will fit in a Flexible second operand,
// if a Flexible second operand is allowed. We need to know the exact
// value, so that rules out relocatable constants.
......
......@@ -733,9 +733,7 @@ public:
/// representation of the vector.
static Type getInVectorElementType(Type Ty) {
assert(isVectorType(Ty));
size_t Index = static_cast<size_t>(Ty);
(void)Index;
assert(Index < TableTypeX8632AttributesSize);
assert(Ty < TableTypeX8632AttributesSize);
return TableTypeX8632Attributes[Ty].InVectorElementType;
}
......@@ -790,9 +788,8 @@ public:
/// @}
static Cond::BrCond getIcmp32Mapping(InstIcmp::ICond Cond) {
size_t Index = static_cast<size_t>(Cond);
assert(Index < TableIcmp32Size);
return TableIcmp32[Index].Mapping;
assert(Cond < TableIcmp32Size);
return TableIcmp32[Cond].Mapping;
}
static const struct TableTypeX8632AttributesType {
......@@ -959,7 +956,7 @@ public:
static SpillVariable *create(Cfg *Func, Type Ty, SizeT Index) {
return new (Func->allocate<SpillVariable>()) SpillVariable(Ty, Index);
}
const static OperandKind SpillVariableKind =
constexpr static auto SpillVariableKind =
static_cast<OperandKind>(kVariable_Target);
static bool classof(const Operand *Operand) {
return Operand->getKind() == SpillVariableKind;
......
......@@ -412,7 +412,7 @@ Traits::X86OperandMem *TargetX8664::_sandbox_mem_reference(X86OperandMem *Mem) {
if (Offset != nullptr) {
if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(Offset)) {
NeedsLea = CR->getName() != "" || CR->getOffset() < 0;
} else if (const auto *Imm = llvm::cast<ConstantInteger32>(Offset)) {
} else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Offset)) {
NeedsLea = Imm->getValue() < 0;
} else {
llvm::report_fatal_error("Unexpected Offset type.");
......
......@@ -784,9 +784,7 @@ public:
/// representation of the vector.
static Type getInVectorElementType(Type Ty) {
assert(isVectorType(Ty));
size_t Index = static_cast<size_t>(Ty);
(void)Index;
assert(Index < TableTypeX8664AttributesSize);
assert(Ty < TableTypeX8664AttributesSize);
return TableTypeX8664Attributes[Ty].InVectorElementType;
}
......@@ -841,9 +839,8 @@ public:
/// @}
static Cond::BrCond getIcmp32Mapping(InstIcmp::ICond Cond) {
size_t Index = static_cast<size_t>(Cond);
assert(Index < TableIcmp32Size);
return TableIcmp32[Index].Mapping;
assert(Cond < TableIcmp32Size);
return TableIcmp32[Cond].Mapping;
}
static const struct TableTypeX8664AttributesType {
......@@ -1004,7 +1001,7 @@ public:
static SpillVariable *create(Cfg *Func, Type Ty, SizeT Index) {
return new (Func->allocate<SpillVariable>()) SpillVariable(Ty, Index);
}
const static OperandKind SpillVariableKind =
constexpr static auto SpillVariableKind =
static_cast<OperandKind>(kVariable_Target);
static bool classof(const Operand *Operand) {
return Operand->getKind() == SpillVariableKind;
......
......@@ -133,9 +133,8 @@ const TypePropertyFields TypePropertiesTable[] = {
} // end anonymous namespace
const char *targetArchString(const TargetArch Arch) {
size_t Index = static_cast<size_t>(Arch);
if (Index < TargetArch_NUM)
return TargetArchName[Index];
if (Arch < TargetArch_NUM)
return TargetArchName[Arch];
llvm_unreachable("Invalid target arch for targetArchString");
return "???";
}
......@@ -146,121 +145,106 @@ size_t typeWidthInBytes(Type Ty) {
}
int8_t typeWidthInBytesLog2(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypeAttributes[Index].TypeWidthInBytesLog2;
if (Ty < IceType_NUM)
return TypeAttributes[Ty].TypeWidthInBytesLog2;
llvm_unreachable("Invalid type for typeWidthInBytesLog2()");
return 0;
}
size_t typeAlignInBytes(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypeAttributes[Index].TypeAlignInBytes;
if (Ty < IceType_NUM)
return TypeAttributes[Ty].TypeAlignInBytes;
llvm_unreachable("Invalid type for typeAlignInBytes()");
return 1;
}
size_t typeNumElements(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypeAttributes[Index].TypeNumElements;
if (Ty < IceType_NUM)
return TypeAttributes[Ty].TypeNumElements;
llvm_unreachable("Invalid type for typeNumElements()");
return 1;
}
Type typeElementType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypeAttributes[Index].TypeElementType;
if (Ty < IceType_NUM)
return TypeAttributes[Ty].TypeElementType;
llvm_unreachable("Invalid type for typeElementType()");
return IceType_void;
}
bool isVectorType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsVectorType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsVectorType;
llvm_unreachable("Invalid type for isVectorType()");
return false;
}
bool isIntegerType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsIntegerType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsIntegerType;
llvm_unreachable("Invalid type for isIntegerType()");
return false;
}
bool isScalarIntegerType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsScalarIntegerType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsScalarIntegerType;
llvm_unreachable("Invalid type for isScalIntegerType()");
return false;
}
bool isVectorIntegerType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsVectorIntegerType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsVectorIntegerType;
llvm_unreachable("Invalid type for isVectorIntegerType()");
return false;
}
bool isIntegerArithmeticType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsIntegerArithmeticType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsIntegerArithmeticType;
llvm_unreachable("Invalid type for isIntegerArithmeticType()");
return false;
}
bool isFloatingType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsFloatingType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsFloatingType;
llvm_unreachable("Invalid type for isFloatingType()");
return false;
}
bool isScalarFloatingType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsScalarFloatingType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsScalarFloatingType;
llvm_unreachable("Invalid type for isScalarFloatingType()");
return false;
}
bool isVectorFloatingType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsVectorFloatingType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsVectorFloatingType;
llvm_unreachable("Invalid type for isVectorFloatingType()");
return false;
}
bool isLoadStoreType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsLoadStoreType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsLoadStoreType;
llvm_unreachable("Invalid type for isLoadStoreType()");
return false;
}
bool isCallParameterType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].TypeIsCallParameterType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].TypeIsCallParameterType;
llvm_unreachable("Invalid type for isCallParameterType()");
return false;
}
Type getCompareResultType(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypePropertiesTable[Index].CompareResultType;
if (Ty < IceType_NUM)
return TypePropertiesTable[Ty].CompareResultType;
llvm_unreachable("Invalid type for getCompareResultType");
return IceType_void;
}
......@@ -275,9 +259,8 @@ SizeT getScalarIntBitWidth(Type Ty) {
// ======================== Dump routines ======================== //
const char *typeString(Type Ty) {
size_t Index = static_cast<size_t>(Ty);
if (Index < IceType_NUM)
return TypeAttributes[Index].DisplayString;
if (Ty < IceType_NUM)
return TypeAttributes[Ty].DisplayString;
llvm_unreachable("Invalid type for typeString");
return "???";
}
......
......@@ -2737,7 +2737,7 @@ void FunctionParser::ProcessRecord() {
appendErrorInstruction(ReturnType);
return;
}
bool IsTailCall = static_cast<bool>(CCInfo & 1);
const bool IsTailCall = (CCInfo & 1);
// Create the call instruction.
Ice::Variable *Dest = (ReturnType == Ice::IceType_void)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment