Commit 008f4ce5 by John Porto

Subzero. X8664. Fixes filetype=asm.

Fixes filetype=asm for x8664. BUG= https://bugs.chromium.org/p/nativeclient/issues/detail?id=4077 R=stichnot@chromium.org Review URL: https://codereview.chromium.org/1543573002 .
parent 5412eb7d
......@@ -400,8 +400,8 @@ check-xtest: $(OBJDIR)/pnacl-sz make_symlink runtime
-i x8632,native,sse2 \
-i x8632,native,sse4.1,test_vector_ops \
-i x8632,sandbox,sse4.1,Om1 \
-e x8664,native,sse2 \
-e x8664,native,sse4.1,test_vector_ops \
-i x8664,native,sse2 \
-i x8664,native,sse4.1,test_vector_ops \
-e x8664,sandbox,sse4.1,Om1 \
-i arm32,neon \
-e arm32,neon,test_vector_ops \
......
......@@ -140,9 +140,21 @@ void MachineTraits<TargetX8664>::X86OperandMem::emit(const Cfg *Func) const {
if (Base || Index) {
Str << "(";
if (Base)
Base->emit(Func);
if (Base) {
const Variable *Base32 = Base;
if (Base->getType() != IceType_i32) {
// X86-64 is ILP32, but %rsp and %rbp are accessed as 64-bit registers.
// For filetype=asm, they need to be emitted as their 32-bit sibilings.
assert(Base->getType() == IceType_i64);
assert(Base->getRegNum() == RegX8664::Encoded_Reg_rsp ||
Base->getRegNum() == RegX8664::Encoded_Reg_rbp);
Base32 = Base->asType(IceType_i32, X8664::Traits::getGprForType(
IceType_i32, Base->getRegNum()));
}
Base32->emit(Func);
}
if (Index) {
assert(Index->getType() == IceType_i32);
Str << ",";
Index->emit(Func);
if (Shift)
......
......@@ -1151,6 +1151,8 @@ public:
return new (Func->allocate<InstX86Movzx>()) InstX86Movzx(Func, Dest, Src);
}
void emit(const Cfg *Func) const override;
void emitIAS(const Cfg *Func) const override;
private:
......@@ -1167,6 +1169,8 @@ public:
return new (Func->allocate<InstX86Movd>()) InstX86Movd(Func, Dest, Src);
}
void emit(const Cfg *Func) const override;
void emitIAS(const Cfg *Func) const override;
private:
......
......@@ -1266,9 +1266,11 @@ void InstX86Cbwdq<Machine>::emit(const Cfg *Func) const {
"cltd";
break;
case IceType_i64:
assert(DestReg == InstX86Base<Machine>::Traits::RegisterSet::Reg_edx);
assert(InstX86Base<Machine>::Traits::Is64Bit);
assert(SrcReg == InstX86Base<Machine>::Traits::getRaxOrDie());
assert(DestReg == InstX86Base<Machine>::Traits::getRdxOrDie());
Str << "\t"
"cdto";
"cqo";
break;
}
}
......@@ -2129,10 +2131,6 @@ template <class Machine> void InstX86Lea<Machine>::emit(const Cfg *Func) const {
this->getDest()->emit(Func);
}
inline bool isIntegerConstant(const Operand *Op) {
return llvm::isa<ConstantInteger32>(Op) || llvm::isa<ConstantInteger64>(Op);
}
template <class Machine> void InstX86Mov<Machine>::emit(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
......@@ -2142,9 +2140,10 @@ template <class Machine> void InstX86Mov<Machine>::emit(const Cfg *Func) const {
Type SrcTy = Src->getType();
Type DestTy = this->getDest()->getType();
if (InstX86Base<Machine>::Traits::Is64Bit && DestTy == IceType_i64 &&
isIntegerConstant(Src)) {
llvm::isa<ConstantInteger64>(Src)) {
Str << "\t"
"movabs\t";
"movabs"
"\t";
} else {
Str << "\t"
"mov" << (!isScalarFloatingType(DestTy)
......@@ -2214,19 +2213,14 @@ void InstX86Mov<Machine>::emitIAS(const Cfg *Func) const {
assert(isScalarIntegerType(DestTy));
// Widen DestTy for truncation (see above note). We should only do this
// when both Src and Dest are integer types.
if (InstX86Base<Machine>::Traits::Is64Bit && DestTy == IceType_i64 &&
isIntegerConstant(Src)) {
uint64_t Value = -1;
if (InstX86Base<Machine>::Traits::Is64Bit && DestTy == IceType_i64) {
if (const auto *C64 = llvm::dyn_cast<ConstantInteger64>(Src)) {
Value = C64->getValue();
} else {
Value = llvm::cast<ConstantInteger32>(Src)->getValue();
Func->getAssembler<typename InstX86Base<Machine>::Traits::Assembler>()
->movabs(InstX86Base<Machine>::Traits::getEncodedGPR(
Dest->getRegNum()),
C64->getValue());
return;
}
Func->getAssembler<typename InstX86Base<Machine>::Traits::Assembler>()
->movabs(
InstX86Base<Machine>::Traits::getEncodedGPR(Dest->getRegNum()),
Value);
return;
}
if (isScalarIntegerType(SrcTy)) {
SrcTy = DestTy;
......@@ -2260,6 +2254,30 @@ void InstX86Mov<Machine>::emitIAS(const Cfg *Func) const {
}
template <class Machine>
void InstX86Movd<Machine>::emit(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
assert(this->getSrcSize() == 1);
Variable *Dest = this->getDest();
Operand *Src = this->getSrc(0);
if (Dest->getType() == IceType_i64 || Src->getType() == IceType_i64) {
assert(Dest->getType() == IceType_f64 || Src->getType() == IceType_f64);
assert(Dest->getType() != Src->getType());
Ostream &Str = Func->getContext()->getStrEmit();
Str << "\t"
"movq"
"\t";
Src->emit(Func);
Str << ", ";
Dest->emit(Func);
return;
}
InstX86BaseUnaryopXmm<Machine, InstX86Base<Machine>::Movd>::emit(Func);
}
template <class Machine>
void InstX86Movd<Machine>::emitIAS(const Cfg *Func) const {
typename InstX86Base<Machine>::Traits::Assembler *Asm =
Func->getAssembler<typename InstX86Base<Machine>::Traits::Assembler>();
......@@ -2359,7 +2377,8 @@ void InstX86Movq<Machine>::emit(const Cfg *Func) const {
assert(this->getDest()->getType() == IceType_i64 ||
this->getDest()->getType() == IceType_f64);
Str << "\t"
"movq\t";
"movq"
"\t";
this->getSrc(0)->emit(Func);
Str << ", ";
this->getDest()->emit(Func);
......@@ -2412,6 +2431,33 @@ void InstX86Movsx<Machine>::emitIAS(const Cfg *Func) const {
}
template <class Machine>
void InstX86Movzx<Machine>::emit(const Cfg *Func) const {
if (!BuildDefs::dump())
return;
if (InstX86Base<Machine>::Traits::Is64Bit) {
// There's no movzx %eXX, %rXX. To zero extend 32- to 64-bits, we emit a
// mov %eXX, %eXX. The processor will still do a movzx[bw]q.
assert(this->getSrcSize() == 1);
const Operand *Src = this->getSrc(0);
const Variable *Dest = this->Dest;
if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64) {
Ostream &Str = Func->getContext()->getStrEmit();
Str << "\t"
"mov"
"\t";
Src->emit(Func);
Str << ", ";
Dest->asType(IceType_i32, InstX86Base<Machine>::Traits::getGprForType(
IceType_i32, Dest->getRegNum()))
->emit(Func);
Str << " /* movzx */";
return;
}
}
InstX86BaseUnaryopGPR<Machine, InstX86Base<Machine>::Movzx>::emit(Func);
}
template <class Machine>
void InstX86Movzx<Machine>::emitIAS(const Cfg *Func) const {
assert(this->getSrcSize() == 1);
const Variable *Dest = this->getDest();
......
......@@ -118,20 +118,22 @@ getRegisterForXmmArgNum(uint32_t ArgNum) {
}
static inline TargetX8664::Traits::RegisterSet::AllRegisters
getRegisterForGprArgNum(uint32_t ArgNum) {
getRegisterForGprArgNum(Type Ty, uint32_t ArgNum) {
assert(ArgNum < TargetX8664::Traits::X86_MAX_GPR_ARGS);
static const TargetX8664::Traits::RegisterSet::AllRegisters GprForArgNum[] = {
TargetX8664::Traits::RegisterSet::Reg_edi,
TargetX8664::Traits::RegisterSet::Reg_esi,
TargetX8664::Traits::RegisterSet::Reg_edx,
TargetX8664::Traits::RegisterSet::Reg_ecx,
TargetX8664::Traits::RegisterSet::Reg_r8d,
TargetX8664::Traits::RegisterSet::Reg_r9d,
TargetX8664::Traits::RegisterSet::Reg_rdi,
TargetX8664::Traits::RegisterSet::Reg_rsi,
TargetX8664::Traits::RegisterSet::Reg_rdx,
TargetX8664::Traits::RegisterSet::Reg_rcx,
TargetX8664::Traits::RegisterSet::Reg_r8,
TargetX8664::Traits::RegisterSet::Reg_r9,
};
static_assert(llvm::array_lengthof(GprForArgNum) ==
TargetX8664::TargetX8664::Traits::X86_MAX_GPR_ARGS,
"Mismatch between MAX_GPR_ARGS and GprForArgNum.");
return GprForArgNum[ArgNum];
assert(Ty == IceType_i64 || Ty == IceType_i32);
return static_cast<TargetX8664::Traits::RegisterSet::AllRegisters>(
TargetX8664::Traits::getGprForType(Ty, GprForArgNum[ArgNum]));
}
// constexprMax returns a (constexpr) max(S0, S1), and it is used for defining
......@@ -168,7 +170,7 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
llvm::SmallVector<Operand *, constexprMax(Traits::X86_MAX_XMM_ARGS,
Traits::X86_MAX_GPR_ARGS)>;
OperandList XmmArgs;
OperandList GprArgs;
CfgVector<std::pair<const Type, Operand *>> GprArgs;
OperandList StackArgs, StackArgLocations;
int32_t ParameterAreaSizeBytes = 0;
......@@ -186,14 +188,15 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
XmmArgs.push_back(Arg);
} else if (isScalarIntegerType(Ty) &&
GprArgs.size() < Traits::X86_MAX_GPR_ARGS) {
GprArgs.push_back(Arg);
GprArgs.emplace_back(Ty, Arg);
} else {
StackArgs.push_back(Arg);
if (isVectorType(Arg->getType())) {
ParameterAreaSizeBytes =
Traits::applyStackAlignment(ParameterAreaSizeBytes);
}
Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_rsp);
Variable *esp =
getPhysicalRegister(Traits::RegisterSet::Reg_rsp, IceType_i64);
Constant *Loc = Ctx->getConstantInt32(ParameterAreaSizeBytes);
StackArgLocations.push_back(
Traits::X86OperandMem::create(Func, Ty, esp, Loc));
......@@ -230,7 +233,29 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
}
for (SizeT i = 0, NumGprArgs = GprArgs.size(); i < NumGprArgs; ++i) {
Variable *Reg = legalizeToReg(GprArgs[i], getRegisterForGprArgNum(i));
const Type SignatureTy = GprArgs[i].first;
Operand *Arg = GprArgs[i].second;
Variable *Reg =
legalizeToReg(Arg, getRegisterForGprArgNum(Arg->getType(), i));
assert(SignatureTy == IceType_i64 || SignatureTy == IceType_i32);
if (SignatureTy != Arg->getType()) {
if (SignatureTy == IceType_i32) {
assert(Arg->getType() == IceType_i64);
Variable *T = makeReg(
IceType_i32, Traits::getGprForType(IceType_i32, Reg->getRegNum()));
_mov(T, Reg);
Reg = T;
} else {
// This branch has never been reached, so we leave the assert(false)
// here until we figure out how to exercise it.
assert(false);
assert(Arg->getType() == IceType_i32);
Variable *T = makeReg(
IceType_i64, Traits::getGprForType(IceType_i64, Reg->getRegNum()));
_movzx(T, Reg);
Reg = T;
}
}
Context.insert<InstFakeUse>(Reg);
}
......@@ -248,10 +273,15 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
case IceType_i1:
case IceType_i8:
case IceType_i16:
// The bitcode should never return an i1, i8, or i16.
assert(false);
// Fallthrough intended.
case IceType_i32:
case IceType_i64:
ReturnReg = makeReg(Dest->getType(), Traits::RegisterSet::Reg_eax);
break;
case IceType_i64:
ReturnReg = makeReg(Dest->getType(), Traits::RegisterSet::Reg_rax);
break;
case IceType_f32:
case IceType_f64:
case IceType_v4i1:
......@@ -267,6 +297,13 @@ void TargetX8664::lowerCall(const InstCall *Instr) {
}
Operand *CallTarget = legalize(Instr->getCallTarget(), Legal_Reg | Legal_Imm);
if (auto *CallTargetR = llvm::dyn_cast<Variable>(CallTarget)) {
// x86-64 in Subzero is ILP32. Therefore, CallTarget is i32, but the emitted
// call needs a i64 register (for textual asm.)
Variable *T = makeReg(IceType_i64);
_movzx(T, CallTargetR);
CallTarget = T;
}
const bool NeedSandboxing = Ctx->getFlags().getUseSandboxing();
if (NeedSandboxing) {
llvm_unreachable("X86-64 Sandboxing codegen not implemented.");
......@@ -330,7 +367,7 @@ void TargetX8664::lowerArguments() {
if (NumGprArgs >= Traits::X86_MAX_GPR_ARGS) {
continue;
}
RegNum = getRegisterForGprArgNum(NumGprArgs);
RegNum = getRegisterForGprArgNum(Ty, NumGprArgs);
++NumGprArgs;
RegisterArg = Func->makeVariable(Ty);
}
......@@ -359,7 +396,8 @@ void TargetX8664::lowerRet(const InstRet *Inst) {
Reg = legalizeToReg(Src0, Traits::RegisterSet::Reg_xmm0);
} else {
assert(isScalarIntegerType(Src0->getType()));
_mov(Reg, Src0, Traits::RegisterSet::Reg_eax);
_mov(Reg, Src0, Traits::getGprForType(Src0->getType(),
Traits::RegisterSet::Reg_rax));
}
}
// Add a ret instruction even if sandboxing is enabled, because addEpilog
......@@ -479,8 +517,10 @@ void TargetX8664::addProlog(CfgNode *Node) {
assert((RegsUsed & getRegisterSet(RegSet_FramePointer, RegSet_None))
.count() == 0);
PreservedRegsSizeBytes += typeWidthInBytes(IceType_i64);
Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_rbp);
Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_rsp);
Variable *ebp =
getPhysicalRegister(Traits::RegisterSet::Reg_rbp, IceType_i64);
Variable *esp =
getPhysicalRegister(Traits::RegisterSet::Reg_rsp, IceType_i64);
_push(ebp);
_mov(ebp, esp);
// Keep ebp live for late-stage liveness analysis (e.g. asm-verbose mode).
......@@ -528,7 +568,7 @@ void TargetX8664::addProlog(CfgNode *Node) {
if (PrologEmitsFixedAllocas &&
FixedAllocaAlignBytes > Traits::X86_STACK_ALIGNMENT_BYTES) {
assert(IsEbpBasedFrame);
_and(getPhysicalRegister(Traits::RegisterSet::Reg_rsp),
_and(getPhysicalRegister(Traits::RegisterSet::Reg_rsp, IceType_i64),
Ctx->getConstantInt32(-FixedAllocaAlignBytes));
}
}
......@@ -542,7 +582,8 @@ void TargetX8664::addProlog(CfgNode *Node) {
// Fill in stack offsets for stack args, and copy args into registers for
// those that were register-allocated. Args are pushed right to left, so
// Arg[0] is closest to the stack/frame pointer.
Variable *FramePtr = getPhysicalRegister(getFrameOrStackReg());
Variable *FramePtr =
getPhysicalRegister(getFrameOrStackReg(), Traits::WordType);
size_t BasicFrameOffset =
PreservedRegsSizeBytes + Traits::X86_RET_IP_SIZE_BYTES;
if (!IsEbpBasedFrame)
......@@ -637,9 +678,11 @@ void TargetX8664::addEpilog(CfgNode *Node) {
Context.init(Node);
Context.setInsertPoint(InsertPoint);
Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_rsp);
Variable *esp =
getPhysicalRegister(Traits::RegisterSet::Reg_rsp, IceType_i64);
if (IsEbpBasedFrame) {
Variable *ebp = getPhysicalRegister(Traits::RegisterSet::Reg_rbp);
Variable *ebp =
getPhysicalRegister(Traits::RegisterSet::Reg_rbp, IceType_i64);
// For late-stage liveness analysis (e.g. asm-verbose mode), adding a fake
// use of esp before the assignment of esp=ebp keeps previous esp
// adjustments from being dead-code eliminated.
......
......@@ -382,7 +382,73 @@ template <> struct MachineTraits<TargetX8664> {
return BaseRegs[RegNum];
}
static int32_t getGprForType(Type, int32_t RegNum) { return RegNum; }
private:
static int32_t getFirstGprForType(Type Ty) {
switch (Ty) {
default:
llvm_unreachable("Invalid type for GPR.");
case IceType_i1:
case IceType_i8:
return RegisterSet::Reg_al;
case IceType_i16:
return RegisterSet::Reg_ax;
case IceType_i32:
return RegisterSet::Reg_eax;
case IceType_i64:
return RegisterSet::Reg_rax;
}
}
public:
static int32_t getGprForType(Type Ty, int32_t RegNum) {
assert(RegNum != Variable::NoRegister);
if (!isScalarIntegerType(Ty)) {
return RegNum;
}
assert(Ty == IceType_i1 || Ty == IceType_i8 || Ty == IceType_i16 ||
Ty == IceType_i32 || Ty == IceType_i64);
if (RegNum == RegisterSet::Reg_ah) {
assert(Ty == IceType_i8);
return RegNum;
}
assert(RegNum != RegisterSet::Reg_bh);
assert(RegNum != RegisterSet::Reg_ch);
assert(RegNum != RegisterSet::Reg_dh);
const int32_t FirstGprForType = getFirstGprForType(Ty);
switch (RegNum) {
default:
llvm::report_fatal_error("Unknown register.");
#define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \
isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, is16To8, \
isTrunc8Rcvr, isAhRcvr, aliases) \
case RegisterSet::val: { \
assert(isGPR); \
assert((is64) || (is32) || (is16) || (is8) || \
getBaseReg(RegisterSet::val) == RegisterSet::Reg_rsp); \
constexpr int32_t FirstGprWithRegNumSize = \
((is64) || RegisterSet::val == RegisterSet::Reg_rsp) \
? RegisterSet::Reg_rax \
: (((is32) || RegisterSet::val == RegisterSet::Reg_esp) \
? RegisterSet::Reg_eax \
: (((is16) || RegisterSet::val == RegisterSet::Reg_sp) \
? RegisterSet::Reg_ax \
: RegisterSet::Reg_al)); \
const int32_t NewRegNum = \
RegNum - FirstGprWithRegNumSize + FirstGprForType; \
assert(getBaseReg(RegNum) == getBaseReg(NewRegNum) && \
"Error involving " #val); \
return NewRegNum; \
}
REGX8664_TABLE
#undef X
}
}
static void initRegisterSet(
std::array<llvm::SmallBitVector, RCX86_NUM> *TypeToRegisterSet,
......
......@@ -222,7 +222,7 @@ protected:
void lowerMemset(Operand *Dest, Operand *Val, Operand *Count);
/// Lower an indirect jump adding sandboxing when needed.
void lowerIndirectJump(Variable *Target);
void lowerIndirectJump(Variable *JumpTarget);
/// Check the comparison is in [Min,Max]. The flags register will be modified
/// with:
......@@ -249,7 +249,8 @@ protected:
/// Emit a fake use of esp to make sure esp stays alive for the entire
/// function. Otherwise some esp adjustments get dead-code eliminated.
void keepEspLiveAtExit() {
Variable *esp = Func->getTarget()->getPhysicalRegister(getStackReg());
Variable *esp =
Func->getTarget()->getPhysicalRegister(getStackReg(), Traits::WordType);
Context.insert<InstFakeUse>(esp);
}
......
......@@ -782,12 +782,13 @@ Variable *TargetX86Base<Machine>::getPhysicalRegister(SizeT RegNum, Type Ty) {
// Don't bother tracking the live range of a named physical register.
Reg->setIgnoreLiveness();
}
assert(Traits::getGprForType(Ty, RegNum) == static_cast<int32_t>(RegNum));
return Reg;
}
template <class Machine>
IceString TargetX86Base<Machine>::getRegName(SizeT RegNum, Type) const {
return Traits::getRegName(RegNum);
IceString TargetX86Base<Machine>::getRegName(SizeT RegNum, Type Ty) const {
return Traits::getRegName(Traits::getGprForType(Ty, RegNum));
}
template <class Machine>
......@@ -994,7 +995,7 @@ void TargetX86Base<Machine>::lowerAlloca(const InstAlloca *Inst) {
if (UseFramePointer)
setHasFramePointer();
Variable *esp = getPhysicalRegister(getStackReg());
Variable *esp = getPhysicalRegister(getStackReg(), Traits::WordType);
if (OverAligned) {
_and(esp, Ctx->getConstantInt32(-Alignment));
}
......@@ -1019,8 +1020,12 @@ void TargetX86Base<Machine>::lowerAlloca(const InstAlloca *Inst) {
} else {
// Non-constant sizes need to be adjusted to the next highest multiple of
// the required alignment at runtime.
Variable *T = makeReg(IceType_i32);
_mov(T, TotalSize);
Variable *T = makeReg(Traits::WordType);
if (Traits::Is64Bit && TotalSize->getType() != IceType_i64) {
_movzx(T, TotalSize);
} else {
_mov(T, TotalSize);
}
_add(T, Ctx->getConstantInt32(Alignment - 1));
_and(T, Ctx->getConstantInt32(-Alignment));
_sub(esp, T);
......@@ -1717,6 +1722,7 @@ void TargetX86Base<Machine>::lowerArithmetic(const InstArithmetic *Inst) {
case IceType_i64:
Eax = Traits::getRaxOrDie();
Edx = Traits::getRdxOrDie();
break;
case IceType_i32:
Eax = Traits::RegisterSet::Reg_eax;
Edx = Traits::RegisterSet::Reg_edx;
......@@ -1730,8 +1736,9 @@ void TargetX86Base<Machine>::lowerArithmetic(const InstArithmetic *Inst) {
Edx = Traits::RegisterSet::Reg_ah;
break;
}
T_edx = makeReg(Ty, Edx);
_mov(T, Src0, Eax);
_mov(T_edx, Ctx->getConstantZero(Ty), Edx);
_mov(T_edx, Ctx->getConstantZero(Ty));
_div(T, Src1, T_edx);
_mov(Dest, T);
} break;
......@@ -2309,8 +2316,6 @@ void TargetX86Base<Machine>::lowerCast(const InstCast *Inst) {
case IceType_i64: {
assert(Src0->getType() == IceType_f64);
if (Traits::Is64Bit) {
// Movd requires its fp argument (in this case, the bitcast source) to
// be an xmm register.
Variable *Src0R = legalizeToReg(Src0);
Variable *T = makeReg(IceType_i64);
_movd(T, Src0R);
......@@ -2356,8 +2361,6 @@ void TargetX86Base<Machine>::lowerCast(const InstCast *Inst) {
if (Traits::Is64Bit) {
Operand *Src0RM = legalize(Src0, Legal_Reg | Legal_Mem);
Variable *T = makeReg(IceType_f64);
// Movd requires its fp argument (in this case, the bitcast
// destination) to be an xmm register.
_movd(T, Src0RM);
_mov(Dest, T);
} else {
......@@ -3551,14 +3554,18 @@ void TargetX86Base<Machine>::lowerIntrinsicCall(
return;
}
case Intrinsics::Stacksave: {
Variable *esp = Func->getTarget()->getPhysicalRegister(getStackReg());
Variable *esp =
Func->getTarget()->getPhysicalRegister(getStackReg(), Traits::WordType);
Variable *Dest = Instr->getDest();
_mov(Dest, esp);
return;
}
case Intrinsics::Stackrestore: {
Variable *esp = Func->getTarget()->getPhysicalRegister(getStackReg());
_redefined(_mov(esp, Instr->getArg(0)));
Operand *Src = Instr->getArg(0);
const Type SrcTy = Src->getType();
Variable *esp = Func->getTarget()->getPhysicalRegister(
Traits::getGprForType(SrcTy, getStackReg()), SrcTy);
_redefined(_mov(esp, Src));
return;
}
case Intrinsics::Trap:
......@@ -4261,15 +4268,20 @@ void TargetX86Base<Machine>::lowerMemset(Operand *Dest, Operand *Val,
}
template <class Machine>
void TargetX86Base<Machine>::lowerIndirectJump(Variable *Target) {
void TargetX86Base<Machine>::lowerIndirectJump(Variable *JumpTarget) {
const bool NeedSandboxing = Ctx->getFlags().getUseSandboxing();
if (Traits::Is64Bit) {
Variable *T = makeReg(IceType_i64);
_movzx(T, JumpTarget);
JumpTarget = T;
}
if (NeedSandboxing) {
_bundle_lock();
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(Target, Ctx->getConstantInt32(~(BundleSize - 1)));
_and(JumpTarget, Ctx->getConstantInt32(~(BundleSize - 1)));
}
_jmp(Target);
_jmp(JumpTarget);
if (NeedSandboxing)
_bundle_unlock();
}
......@@ -4671,7 +4683,7 @@ void TargetX86Base<Machine>::doMockBoundsCheck(Operand *Opnd) {
// We use lowerStore() to copy out-args onto the stack. This creates a memory
// operand with the stack pointer as the base register. Don't do bounds
// checks on that.
if (Var->getRegNum() == Traits::RegisterSet::Reg_esp)
if (Var->getRegNum() == static_cast<int32_t>(getStackReg()))
return;
auto *Label = Traits::Insts::Label::create(Func, this);
......@@ -5981,8 +5993,10 @@ Operand *TargetX86Base<Machine>::legalize(Operand *From, LegalMask Allowed,
// register in x86-64.
if (Traits::Is64Bit) {
if (llvm::isa<ConstantInteger64>(Const)) {
Variable *V = copyToReg(Const, RegNum);
return V;
if (RegNum != Variable::NoRegister) {
assert(Traits::getGprForType(IceType_i64, RegNum) == RegNum);
}
return copyToReg(Const, RegNum);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment