Commit 56958cb3 by John Porto

Suzero. X8664. NaCl Sandboxing.

parent b19d39cc
...@@ -134,10 +134,15 @@ ifdef FORCEASM ...@@ -134,10 +134,15 @@ ifdef FORCEASM
# resulting nexe. So we just disable those tests for now. # resulting nexe. So we just disable those tests for now.
FORCEASM_XTEST_EXCLUDES = -e x8632,sandbox,test_sync_atomic FORCEASM_XTEST_EXCLUDES = -e x8632,sandbox,test_sync_atomic
FORCEASM_LIT_PARAM = --param=FORCEASM FORCEASM_LIT_PARAM = --param=FORCEASM
# x86 sandboxing lit tests are disabled because llvm-mc uses different
# relocations for pushing return addresses onto the stack.
# TODO(jpp): fix this.
FORCEASM_LIT_TEST_EXCLUDES = --filter='^(?!.*/x86/sandboxing.ll).*'
else else
FORCEASM_FLAG = FORCEASM_FLAG =
FORCEASM_XTEST_EXCLUDES = FORCEASM_XTEST_EXCLUDES =
FORCEASM_LIT_PARAM = FORCEASM_LIT_PARAM =
FORCEASM_LIT_TEST_EXCLUDES =
endif endif
SB_OBJDIR := $(OBJDIR)+Sandboxed SB_OBJDIR := $(OBJDIR)+Sandboxed
...@@ -410,7 +415,8 @@ runtime.is.built: $(RT_SRC) pydir/build-runtime.py ...@@ -410,7 +415,8 @@ runtime.is.built: $(RT_SRC) pydir/build-runtime.py
check-lit: $(OBJDIR)/pnacl-sz make_symlink check-lit: $(OBJDIR)/pnacl-sz make_symlink
PNACL_BIN_PATH=$(PNACL_BIN_PATH) \ PNACL_BIN_PATH=$(PNACL_BIN_PATH) \
$(LLVM_SRC_PATH)/utils/lit/lit.py -sv tests_lit $(FORCEASM_LIT_PARAM) $(LLVM_SRC_PATH)/utils/lit/lit.py -sv tests_lit \
$(FORCEASM_LIT_TEST_EXCLUDES) $(FORCEASM_LIT_PARAM)
ifdef MINIMAL ifdef MINIMAL
check-xtest: $(OBJDIR)/pnacl-sz make_symlink runtime check-xtest: $(OBJDIR)/pnacl-sz make_symlink runtime
...@@ -419,8 +425,6 @@ else ...@@ -419,8 +425,6 @@ else
check-xtest: $(OBJDIR)/pnacl-sz make_symlink runtime check-xtest: $(OBJDIR)/pnacl-sz make_symlink runtime
# Do all native/sse2 tests, but only test_vector_ops for native/sse4.1. # Do all native/sse2 tests, but only test_vector_ops for native/sse4.1.
# For (slow) sandboxed tests, limit to Om1/sse4.1. # For (slow) sandboxed tests, limit to Om1/sse4.1.
# TODO(jpp): implement x8664 sandbox, then enable xtests.
# TODO(jpp): reenable the x86-64 tests.
./pydir/crosstest_generator.py -v --lit \ ./pydir/crosstest_generator.py -v --lit \
--toolchain-root $(TOOLCHAIN_ROOT) \ --toolchain-root $(TOOLCHAIN_ROOT) \
$(FORCEASM_FLAG) \ $(FORCEASM_FLAG) \
...@@ -431,7 +435,7 @@ check-xtest: $(OBJDIR)/pnacl-sz make_symlink runtime ...@@ -431,7 +435,7 @@ check-xtest: $(OBJDIR)/pnacl-sz make_symlink runtime
-i x8632,nonsfi,sse2,O2 \ -i x8632,nonsfi,sse2,O2 \
-i x8664,native,sse2 \ -i x8664,native,sse2 \
-i x8664,native,sse4.1,test_vector_ops \ -i x8664,native,sse4.1,test_vector_ops \
-e x8664,sandbox,sse4.1,Om1 \ -i x8664,sandbox,sse4.1,Om1 \
-i arm32,neon \ -i arm32,neon \
-e arm32,nonsfi \ -e arm32,nonsfi \
-e arm32,neon,test_vector_ops \ -e arm32,neon,test_vector_ops \
......
...@@ -15,6 +15,8 @@ def TargetAssemblerFlags(target, sandboxed): ...@@ -15,6 +15,8 @@ def TargetAssemblerFlags(target, sandboxed):
# TODO(reed kotler). Need to find out exactly we need to # TODO(reed kotler). Need to find out exactly we need to
# add here for Mips32. # add here for Mips32.
flags = { 'x8632': ['-triple=%s' % ('i686-nacl' if sandboxed else 'i686')], flags = { 'x8632': ['-triple=%s' % ('i686-nacl' if sandboxed else 'i686')],
'x8664': ['-triple=%s' % (
'x86_64-nacl' if sandboxed else 'x86_64')],
'arm32': ['-triple=%s' % ( 'arm32': ['-triple=%s' % (
'armv7a-nacl' if sandboxed else 'armv7a'), 'armv7a-nacl' if sandboxed else 'armv7a'),
'-mcpu=cortex-a9', '-mattr=+neon'], '-mcpu=cortex-a9', '-mattr=+neon'],
...@@ -24,6 +26,7 @@ def TargetAssemblerFlags(target, sandboxed): ...@@ -24,6 +26,7 @@ def TargetAssemblerFlags(target, sandboxed):
def TargetDisassemblerFlags(target): def TargetDisassemblerFlags(target):
flags = { 'x8632': ['-Mintel'], flags = { 'x8632': ['-Mintel'],
'x8664': ['-Mintel'],
'arm32': [], 'arm32': [],
'mips32':[] } 'mips32':[] }
return flags[target] return flags[target]
...@@ -83,7 +86,7 @@ def main(): ...@@ -83,7 +86,7 @@ def main():
argparser.add_argument('--forceasm', required=False, action='store_true', argparser.add_argument('--forceasm', required=False, action='store_true',
help='Force --filetype=asm') help='Force --filetype=asm')
argparser.add_argument('--target', default='x8632', dest='target', argparser.add_argument('--target', default='x8632', dest='target',
choices=['x8632','arm32','mips32'], choices=['x8632','x8664','arm32','mips32'],
help='Target architecture. Default %(default)s') help='Target architecture. Default %(default)s')
argparser.add_argument('--echo-cmd', required=False, argparser.add_argument('--echo-cmd', required=False,
action='store_true', action='store_true',
......
...@@ -103,7 +103,7 @@ def AddOptionalArgs(argparser): ...@@ -103,7 +103,7 @@ def AddOptionalArgs(argparser):
help='Run only post-Subzero build steps') help='Run only post-Subzero build steps')
def LinkSandbox(objs, exe, target, verbose=True): def LinkSandbox(objs, exe, target, verbose=True):
assert target in ('x8632', 'arm32'), \ assert target in ('x8632', 'x8664', 'arm32'), \
'-sandbox is not available for %s' % target '-sandbox is not available for %s' % target
nacl_root = FindBaseNaCl() nacl_root = FindBaseNaCl()
gold = ('{root}/toolchain/linux_x86/pnacl_newlib_raw/bin/' + gold = ('{root}/toolchain/linux_x86/pnacl_newlib_raw/bin/' +
...@@ -111,6 +111,7 @@ def LinkSandbox(objs, exe, target, verbose=True): ...@@ -111,6 +111,7 @@ def LinkSandbox(objs, exe, target, verbose=True):
target_lib_dir = { target_lib_dir = {
'arm32': 'arm', 'arm32': 'arm',
'x8632': 'x86-32', 'x8632': 'x86-32',
'x8664': 'x86-64',
}[target] }[target]
linklib = ('{root}/toolchain/linux_x86/pnacl_newlib_raw/translator/' + linklib = ('{root}/toolchain/linux_x86/pnacl_newlib_raw/translator/' +
'{target_dir}/lib').format(root=nacl_root, '{target_dir}/lib').format(root=nacl_root,
...@@ -417,7 +418,7 @@ def ProcessPexe(args, pexe, exe): ...@@ -417,7 +418,7 @@ def ProcessPexe(args, pexe, exe):
emulation = { emulation = {
'arm32': 'armelf_linux_eabi', 'arm32': 'armelf_linux_eabi',
'x8632': 'elf_i386', 'x8632': 'elf_i386',
'x8664': 'elf32_x86_64', 'x8664': 'elf32_x86_64' if not args.sandbox else 'elf_x86_64',
}[args.target] }[args.target]
shellcmd(( shellcmd((
'{ld} -r -m {emulation} -o {partial} {sz} {llc}' '{ld} -r -m {emulation} -o {partial} {sz} {llc}'
......
...@@ -44,4 +44,4 @@ ARM32Target = TargetInfo(target='arm32', ...@@ -44,4 +44,4 @@ ARM32Target = TargetInfo(target='arm32',
cross_headers=['-isystem', FindARMCrossInclude()]) cross_headers=['-isystem', FindARMCrossInclude()])
def ConvertTripleToNaCl(nonsfi_triple): def ConvertTripleToNaCl(nonsfi_triple):
return nonsfi_triple.replace('linux', 'nacl') return nonsfi_triple[:nonsfi_triple.find('-linux')] + '-nacl'
...@@ -237,6 +237,9 @@ class Assembler { ...@@ -237,6 +237,9 @@ class Assembler {
Assembler &operator=(const Assembler &) = delete; Assembler &operator=(const Assembler &) = delete;
public: public:
using InternalRelocationList =
std::vector<std::pair<const IceString, const SizeT>>;
enum AssemblerKind { enum AssemblerKind {
Asm_ARM32, Asm_ARM32,
Asm_MIPS32, Asm_MIPS32,
...@@ -323,12 +326,23 @@ public: ...@@ -323,12 +326,23 @@ public:
AssemblerKind getKind() const { return Kind; } AssemblerKind getKind() const { return Kind; }
void addRelocationAtCurrentPosition(const IceString &RelocName) {
if (!getPreliminary()) {
InternalRelocs.emplace_back(RelocName, getBufferSize());
}
}
const InternalRelocationList &getInternalRelocations() const {
return InternalRelocs;
}
protected: protected:
explicit Assembler(AssemblerKind Kind) explicit Assembler(AssemblerKind Kind)
: Kind(Kind), Allocator(), Buffer(*this) {} : Kind(Kind), Allocator(), Buffer(*this) {}
private: private:
const AssemblerKind Kind; const AssemblerKind Kind;
InternalRelocationList InternalRelocs;
ArenaAllocator<32 * 1024> Allocator; ArenaAllocator<32 * 1024> Allocator;
/// FunctionName and IsInternal are transferred from the original Cfg object, /// FunctionName and IsInternal are transferred from the original Cfg object,
......
...@@ -44,11 +44,10 @@ class AssemblerX86Base : public ::Ice::Assembler { ...@@ -44,11 +44,10 @@ class AssemblerX86Base : public ::Ice::Assembler {
AssemblerX86Base &operator=(const AssemblerX86Base &) = delete; AssemblerX86Base &operator=(const AssemblerX86Base &) = delete;
protected: protected:
explicit AssemblerX86Base(bool use_far_branches = false) explicit AssemblerX86Base(bool EmitAddrSizeOverridePrefix = Traits::Is64Bit)
: Assembler(Traits::AsmKind) { : Assembler(Traits::AsmKind),
// This mode is only needed and implemented for MIPS and ARM. EmitAddrSizeOverridePrefix(EmitAddrSizeOverridePrefix) {
assert(!use_far_branches); assert(Traits::Is64Bit || !EmitAddrSizeOverridePrefix);
(void)use_far_branches;
} }
public: public:
...@@ -288,6 +287,8 @@ public: ...@@ -288,6 +287,8 @@ public:
static const intptr_t kCallExternalLabelSize = 5; static const intptr_t kCallExternalLabelSize = 5;
void pushl(GPRRegister reg); void pushl(GPRRegister reg);
void pushl(const Immediate &Imm);
void pushl(const ConstantRelocatable *Label);
void popl(GPRRegister reg); void popl(GPRRegister reg);
void popl(const Address &address); void popl(const Address &address);
...@@ -711,6 +712,11 @@ protected: ...@@ -711,6 +712,11 @@ protected:
private: private:
ENABLE_MAKE_UNIQUE; ENABLE_MAKE_UNIQUE;
// EmidAddrSizeOverridePrefix directs the emission of the 0x67 prefix to
// force 32-bit registers when accessing memory. This is only used in native
// 64-bit.
const bool EmitAddrSizeOverridePrefix;
static constexpr Type RexTypeIrrelevant = IceType_i32; static constexpr Type RexTypeIrrelevant = IceType_i32;
static constexpr Type RexTypeForceRexW = IceType_i64; static constexpr Type RexTypeForceRexW = IceType_i64;
static constexpr GPRRegister RexRegIrrelevant = static constexpr GPRRegister RexRegIrrelevant =
...@@ -746,7 +752,7 @@ private: ...@@ -746,7 +752,7 @@ private:
Label *getOrCreateLabel(SizeT Number, LabelVector &Labels); Label *getOrCreateLabel(SizeT Number, LabelVector &Labels);
void emitAddrSizeOverridePrefix() { void emitAddrSizeOverridePrefix() {
if (!Traits::Is64Bit) { if (!Traits::Is64Bit || !EmitAddrSizeOverridePrefix) {
return; return;
} }
static constexpr uint8_t AddrSizeOverridePrefix = 0x67; static constexpr uint8_t AddrSizeOverridePrefix = 0x67;
......
...@@ -152,6 +152,25 @@ void AssemblerX86Base<TraitsType>::pushl(GPRRegister reg) { ...@@ -152,6 +152,25 @@ void AssemblerX86Base<TraitsType>::pushl(GPRRegister reg) {
} }
template <typename TraitsType> template <typename TraitsType>
void AssemblerX86Base<TraitsType>::pushl(const Immediate &Imm) {
AssemblerBuffer::EnsureCapacity ensured(&Buffer);
emitUint8(0x68);
emitInt32(Imm.value());
}
template <typename TraitsType>
void AssemblerX86Base<TraitsType>::pushl(const ConstantRelocatable *Label) {
AssemblerBuffer::EnsureCapacity ensured(&Buffer);
emitUint8(0x68);
emitFixup(this->createFixup(Traits::FK_Abs, Label));
// In x86-32, the emitted value is an addend to the relocation. Therefore, we
// must emit a 0 (because we're pushing an absolute relocation.)
// In x86-64, the emitted value does not matter (the addend lives in the
// relocation record as an extra field.)
emitInt32(0);
}
template <typename TraitsType>
void AssemblerX86Base<TraitsType>::popl(GPRRegister reg) { void AssemblerX86Base<TraitsType>::popl(GPRRegister reg) {
AssemblerBuffer::EnsureCapacity ensured(&Buffer); AssemblerBuffer::EnsureCapacity ensured(&Buffer);
// Any type that would not force a REX prefix to be emitted can be provided // Any type that would not force a REX prefix to be emitted can be provided
...@@ -382,7 +401,8 @@ template <typename TraitsType> ...@@ -382,7 +401,8 @@ template <typename TraitsType>
void AssemblerX86Base<TraitsType>::lea(Type Ty, GPRRegister dst, void AssemblerX86Base<TraitsType>::lea(Type Ty, GPRRegister dst,
const Address &src) { const Address &src) {
AssemblerBuffer::EnsureCapacity ensured(&Buffer); AssemblerBuffer::EnsureCapacity ensured(&Buffer);
assert(Ty == IceType_i16 || Ty == IceType_i32); assert(Ty == IceType_i16 || Ty == IceType_i32 ||
(Traits::Is64Bit && Ty == IceType_i64));
if (Ty == IceType_i16) if (Ty == IceType_i16)
emitOperandSizeOverride(); emitOperandSizeOverride();
emitAddrSizeOverridePrefix(); emitAddrSizeOverridePrefix();
...@@ -3140,7 +3160,6 @@ void AssemblerX86Base<TraitsType>::jmp(Label *label, bool near) { ...@@ -3140,7 +3160,6 @@ void AssemblerX86Base<TraitsType>::jmp(Label *label, bool near) {
template <typename TraitsType> template <typename TraitsType>
void AssemblerX86Base<TraitsType>::jmp(const ConstantRelocatable *label) { void AssemblerX86Base<TraitsType>::jmp(const ConstantRelocatable *label) {
llvm::report_fatal_error("Untested - please verify and then reenable.");
AssemblerBuffer::EnsureCapacity ensured(&Buffer); AssemblerBuffer::EnsureCapacity ensured(&Buffer);
emitUint8(0xE9); emitUint8(0xE9);
emitFixup(this->createFixup(Traits::FK_PcRel, label)); emitFixup(this->createFixup(Traits::FK_PcRel, label));
...@@ -3307,22 +3326,23 @@ void AssemblerX86Base<TraitsType>::align(intptr_t alignment, intptr_t offset) { ...@@ -3307,22 +3326,23 @@ void AssemblerX86Base<TraitsType>::align(intptr_t alignment, intptr_t offset) {
} }
template <typename TraitsType> template <typename TraitsType>
void AssemblerX86Base<TraitsType>::bind(Label *label) { void AssemblerX86Base<TraitsType>::bind(Label *L) {
intptr_t bound = Buffer.size(); const intptr_t Bound = Buffer.size();
assert(!label->isBound()); // Labels can only be bound once. assert(!L->isBound()); // Labels can only be bound once.
while (label->isLinked()) { while (L->isLinked()) {
intptr_t position = label->getLinkPosition(); const intptr_t Position = L->getLinkPosition();
intptr_t next = Buffer.load<int32_t>(position); const intptr_t Next = Buffer.load<int32_t>(Position);
Buffer.store<int32_t>(position, bound - (position + 4)); const intptr_t Offset = Bound - (Position + 4);
label->Position = next; Buffer.store<int32_t>(Position, Offset);
L->Position = Next;
} }
while (label->hasNear()) { while (L->hasNear()) {
intptr_t position = label->getNearPosition(); intptr_t Position = L->getNearPosition();
intptr_t offset = bound - (position + 1); const intptr_t Offset = Bound - (Position + 1);
assert(Utils::IsInt(8, offset)); assert(Utils::IsInt(8, Offset));
Buffer.store<int8_t>(position, offset); Buffer.store<int8_t>(Position, Offset);
} }
label->bindTo(bound); L->bindTo(Bound);
} }
template <typename TraitsType> template <typename TraitsType>
......
...@@ -1107,6 +1107,11 @@ public: ...@@ -1107,6 +1107,11 @@ public:
return llvm::cast<InstBundleLock>(getBundleLockStart())->getOption() == return llvm::cast<InstBundleLock>(getBundleLockStart())->getOption() ==
InstBundleLock::Opt_AlignToEnd; InstBundleLock::Opt_AlignToEnd;
} }
bool isPadToEnd() const {
assert(isInBundleLockRegion());
return llvm::cast<InstBundleLock>(getBundleLockStart())->getOption() ==
InstBundleLock::Opt_PadToEnd;
}
// Check whether the entire bundle_lock region falls within the same bundle. // Check whether the entire bundle_lock region falls within the same bundle.
bool isSameBundle() const { bool isSameBundle() const {
assert(isInBundleLockRegion()); assert(isInBundleLockRegion());
...@@ -1172,7 +1177,17 @@ public: ...@@ -1172,7 +1177,17 @@ public:
} }
} }
} }
// Update bookkeeping when rolling back for the second pass. // If pad_to_end is specified, add padding such that the first instruction
// after the instruction sequence starts at a bundle boundary.
void padForPadToEnd() {
assert(isInBundleLockRegion());
if (isPadToEnd()) {
if (intptr_t Offset = getPostAlignment()) {
Asm->padWithNop(BundleSize - Offset);
SizeSnapshotPre = Asm->getBufferSize();
}
}
} // Update bookkeeping when rolling back for the second pass.
void rollback() { void rollback() {
assert(isInBundleLockRegion()); assert(isInBundleLockRegion());
Asm->setBufferSize(SizeSnapshotPre); Asm->setBufferSize(SizeSnapshotPre);
...@@ -1261,6 +1276,7 @@ void CfgNode::emitIAS(Cfg *Func) const { ...@@ -1261,6 +1276,7 @@ void CfgNode::emitIAS(Cfg *Func) const {
// If align_to_end is specified, make sure the next instruction begins // If align_to_end is specified, make sure the next instruction begins
// the bundle. // the bundle.
assert(!Helper.isAlignToEnd() || Helper.getPostAlignment() == 0); assert(!Helper.isAlignToEnd() || Helper.getPostAlignment() == 0);
Helper.padForPadToEnd();
Helper.leaveBundleLockRegion(); Helper.leaveBundleLockRegion();
Retrying = false; Retrying = false;
} else { } else {
......
...@@ -30,11 +30,11 @@ namespace Ice { ...@@ -30,11 +30,11 @@ namespace Ice {
namespace { namespace {
struct { constexpr struct {
bool IsELF64; bool IsELF64;
uint16_t ELFMachine; uint16_t ELFMachine;
uint32_t ELFFlags; uint32_t ELFFlags;
} ELFTargetInfo[] = { } ELFTargetInfo[TargetArch_NUM] = {
#define X(tag, str, is_elf64, e_machine, e_flags) \ #define X(tag, str, is_elf64, e_machine, e_flags) \
{ is_elf64, e_machine, e_flags } \ { is_elf64, e_machine, e_flags } \
, ,
...@@ -42,11 +42,19 @@ struct { ...@@ -42,11 +42,19 @@ struct {
#undef X #undef X
}; };
bool isELF64(TargetArch Arch) { bool isELF64(const ClFlags &Flags) {
if (Arch < TargetArch_NUM) const TargetArch Arch = Flags.getTargetArch();
return ELFTargetInfo[Arch].IsELF64; if (Arch >= TargetArch_NUM) {
llvm_unreachable("Invalid target arch for isELF64"); llvm_unreachable("Invalid target arch for isELF64");
return false; return false;
}
if (!Flags.getUseSandboxing()) {
// Unsandboxed code is always ELF32 (pexes are ILP32.)
return false;
}
return ELFTargetInfo[Arch].IsELF64;
} }
uint16_t getELFMachine(TargetArch Arch) { uint16_t getELFMachine(TargetArch Arch) {
...@@ -66,7 +74,7 @@ uint32_t getELFFlags(TargetArch Arch) { ...@@ -66,7 +74,7 @@ uint32_t getELFFlags(TargetArch Arch) {
} // end of anonymous namespace } // end of anonymous namespace
ELFObjectWriter::ELFObjectWriter(GlobalContext &Ctx, ELFStreamer &Out) ELFObjectWriter::ELFObjectWriter(GlobalContext &Ctx, ELFStreamer &Out)
: Ctx(Ctx), Str(Out), ELF64(isELF64(Ctx.getFlags().getTargetArch())) { : Ctx(Ctx), Str(Out), ELF64(isELF64(Ctx.getFlags())) {
// Create the special bookkeeping sections now. // Create the special bookkeeping sections now.
const IceString NullSectionName(""); const IceString NullSectionName("");
NullSection = new (Ctx.allocate<ELFSection>()) NullSection = new (Ctx.allocate<ELFSection>())
...@@ -231,10 +239,10 @@ void ELFObjectWriter::writeFunctionCode(const IceString &FuncName, ...@@ -231,10 +239,10 @@ void ELFObjectWriter::writeFunctionCode(const IceString &FuncName,
Section = TextSections[0]; Section = TextSections[0];
RelSection = RelTextSections[0]; RelSection = RelTextSections[0];
} }
RelocOffsetT OffsetInSection = Section->getCurrentSize(); const RelocOffsetT OffsetInSection = Section->getCurrentSize();
// Function symbols are set to 0 size in the symbol table, in contrast to // Function symbols are set to 0 size in the symbol table, in contrast to
// data symbols which have a proper size. // data symbols which have a proper size.
SizeT SymbolSize = 0; constexpr SizeT SymbolSize = 0;
Section->appendData(Str, Asm->getBufferView()); Section->appendData(Str, Asm->getBufferView());
uint8_t SymbolType; uint8_t SymbolType;
uint8_t SymbolBinding; uint8_t SymbolBinding;
...@@ -248,6 +256,15 @@ void ELFObjectWriter::writeFunctionCode(const IceString &FuncName, ...@@ -248,6 +256,15 @@ void ELFObjectWriter::writeFunctionCode(const IceString &FuncName,
SymTab->createDefinedSym(FuncName, SymbolType, SymbolBinding, Section, SymTab->createDefinedSym(FuncName, SymbolType, SymbolBinding, Section,
OffsetInSection, SymbolSize); OffsetInSection, SymbolSize);
StrTab->add(FuncName); StrTab->add(FuncName);
for (const auto &InternalReloc : Asm->getInternalRelocations()) {
const IceString &RelocName = InternalReloc.first;
constexpr uint8_t RelocSymbolType = STT_NOTYPE;
constexpr uint8_t RelocSymbolBinding = STB_LOCAL;
const SizeT RelocOffsetInSection = OffsetInSection + InternalReloc.second;
SymTab->createDefinedSym(RelocName, RelocSymbolType, RelocSymbolBinding,
Section, RelocOffsetInSection, SymbolSize);
StrTab->add(RelocName);
}
// Copy the fixup information from per-function Assembler memory to the // Copy the fixup information from per-function Assembler memory to the
// object writer's memory, for writing later. // object writer's memory, for writing later.
......
...@@ -356,17 +356,24 @@ void ELFRelocationSection::writeData(const GlobalContext &Ctx, ELFStreamer &Str, ...@@ -356,17 +356,24 @@ void ELFRelocationSection::writeData(const GlobalContext &Ctx, ELFStreamer &Str,
Symbol = SymTab->getNullSymbol(); Symbol = SymTab->getNullSymbol();
} else { } else {
constexpr Assembler *Asm = nullptr; constexpr Assembler *Asm = nullptr;
Symbol = SymTab->findSymbol(Fixup.symbol(&Ctx, Asm)); IceString Name = Fixup.symbol(&Ctx, Asm);
} Symbol = SymTab->findSymbol(Name);
if (!Symbol) if (!Symbol)
llvm::report_fatal_error("Missing symbol mentioned in reloc"); llvm::report_fatal_error(Name + ": Missing symbol mentioned in reloc");
}
if (IsELF64) { if (IsELF64) {
// TODO(jpp): check that Fixup.offset() is correct even for pc-rel.
Elf64_Rela Rela; Elf64_Rela Rela;
Rela.r_offset = Fixup.position(); Rela.r_offset = Fixup.position();
Rela.setSymbolAndType(Symbol->getNumber(), Fixup.kind()); Rela.setSymbolAndType(Symbol->getNumber(), Fixup.kind());
Rela.r_addend = Fixup.offset(); Rela.r_addend = Fixup.offset();
if (Fixup.kind() == llvm::ELF::R_X86_64_PC32) {
// In ELF64, PC-relative relocations' addends need to account for the
// immediate size. For now, this is always 4 (because x86-64 sandboxed
// is the only ELF64 target currently implemented.)
constexpr int32_t RelocImmediateSize = 4;
Rela.r_addend -= RelocImmediateSize;
}
Str.writeAddrOrOffset<IsELF64>(Rela.r_offset); Str.writeAddrOrOffset<IsELF64>(Rela.r_offset);
Str.writeELFXword<IsELF64>(Rela.r_info); Str.writeELFXword<IsELF64>(Rela.r_info);
Str.writeELFXword<IsELF64>(Rela.r_addend); Str.writeELFXword<IsELF64>(Rela.r_addend);
......
...@@ -858,6 +858,10 @@ void InstBundleLock::emit(const Cfg *Func) const { ...@@ -858,6 +858,10 @@ void InstBundleLock::emit(const Cfg *Func) const {
Str << "\t" Str << "\t"
"align_to_end"; "align_to_end";
break; break;
case Opt_PadToEnd:
Str << "\t"
"align_to_end /* pad_to_end */";
break;
} }
Str << "\n"; Str << "\n";
} }
...@@ -873,6 +877,9 @@ void InstBundleLock::dump(const Cfg *Func) const { ...@@ -873,6 +877,9 @@ void InstBundleLock::dump(const Cfg *Func) const {
case Opt_AlignToEnd: case Opt_AlignToEnd:
Str << " align_to_end"; Str << " align_to_end";
break; break;
case Opt_PadToEnd:
Str << " pad_to_end";
break;
} }
} }
......
...@@ -782,7 +782,7 @@ class InstBundleLock : public InstHighLevel { ...@@ -782,7 +782,7 @@ class InstBundleLock : public InstHighLevel {
InstBundleLock &operator=(const InstBundleLock &) = delete; InstBundleLock &operator=(const InstBundleLock &) = delete;
public: public:
enum Option { Opt_None, Opt_AlignToEnd }; enum Option { Opt_None, Opt_AlignToEnd, Opt_PadToEnd };
static InstBundleLock *create(Cfg *Func, Option BundleOption) { static InstBundleLock *create(Cfg *Func, Option BundleOption) {
return new (Func->allocate<InstBundleLock>()) return new (Func->allocate<InstBundleLock>())
InstBundleLock(Func, BundleOption); InstBundleLock(Func, BundleOption);
......
...@@ -76,9 +76,9 @@ void TargetX8632Traits::X86Operand::dump(const Cfg *, Ostream &Str) const { ...@@ -76,9 +76,9 @@ void TargetX8632Traits::X86Operand::dump(const Cfg *, Ostream &Str) const {
TargetX8632Traits::X86OperandMem::X86OperandMem( TargetX8632Traits::X86OperandMem::X86OperandMem(
Cfg *Func, Type Ty, Variable *Base, Constant *Offset, Variable *Index, Cfg *Func, Type Ty, Variable *Base, Constant *Offset, Variable *Index,
uint16_t Shift, SegmentRegisters SegmentReg, bool IsPIC) uint16_t Shift, SegmentRegisters SegmentReg, bool IsRebased)
: X86Operand(kMem, Ty), Base(Base), Offset(Offset), Index(Index), : X86Operand(kMem, Ty), Base(Base), Offset(Offset), Index(Index),
Shift(Shift), SegmentReg(SegmentReg), IsPIC(IsPIC) { Shift(Shift), SegmentReg(SegmentReg), IsRebased(IsRebased) {
assert(Shift <= 3); assert(Shift <= 3);
Vars = nullptr; Vars = nullptr;
NumVars = 0; NumVars = 0;
...@@ -118,12 +118,12 @@ void validateMemOperandPIC(const TargetX8632Traits::X86OperandMem *Mem, ...@@ -118,12 +118,12 @@ void validateMemOperandPIC(const TargetX8632Traits::X86OperandMem *Mem,
const bool HasCR = const bool HasCR =
Mem->getOffset() && llvm::isa<ConstantRelocatable>(Mem->getOffset()); Mem->getOffset() && llvm::isa<ConstantRelocatable>(Mem->getOffset());
(void)HasCR; (void)HasCR;
const bool IsPIC = Mem->getIsPIC(); const bool IsRebased = Mem->getIsRebased();
(void)IsPIC; (void)IsRebased;
if (UseNonsfi) if (UseNonsfi)
assert(HasCR == IsPIC); assert(HasCR == IsRebased);
else else
assert(!IsPIC); assert(!IsRebased);
} }
} // end of anonymous namespace } // end of anonymous namespace
...@@ -260,7 +260,7 @@ void TargetX8632Traits::X86OperandMem::emitSegmentOverride( ...@@ -260,7 +260,7 @@ void TargetX8632Traits::X86OperandMem::emitSegmentOverride(
TargetX8632Traits::Address TargetX8632Traits::X86OperandMem::toAsmAddress( TargetX8632Traits::Address TargetX8632Traits::X86OperandMem::toAsmAddress(
TargetX8632Traits::Assembler *Asm, TargetX8632Traits::Assembler *Asm,
const Ice::TargetLowering *TargetLowering) const { const Ice::TargetLowering *TargetLowering, bool /*IsLeaAddr*/) const {
const auto *Target = const auto *Target =
static_cast<const ::Ice::X8632::TargetX8632 *>(TargetLowering); static_cast<const ::Ice::X8632::TargetX8632 *>(TargetLowering);
const bool UseNonsfi = Target->getGlobalContext()->getFlags().getUseNonsfi(); const bool UseNonsfi = Target->getGlobalContext()->getFlags().getUseNonsfi();
......
...@@ -67,9 +67,9 @@ TargetX8664Traits::X86OperandMem::X86OperandMem(Cfg *Func, Type Ty, ...@@ -67,9 +67,9 @@ TargetX8664Traits::X86OperandMem::X86OperandMem(Cfg *Func, Type Ty,
Variable *Base, Variable *Base,
Constant *Offset, Constant *Offset,
Variable *Index, uint16_t Shift, Variable *Index, uint16_t Shift,
bool IsPIC) bool IsRebased)
: X86Operand(kMem, Ty), Base(Base), Offset(Offset), Index(Index), : X86Operand(kMem, Ty), Base(Base), Offset(Offset), Index(Index),
Shift(Shift), IsPIC(IsPIC) { Shift(Shift), IsRebased(IsRebased) {
assert(Shift <= 3); assert(Shift <= 3);
Vars = nullptr; Vars = nullptr;
NumVars = 0; NumVars = 0;
...@@ -110,6 +110,7 @@ void TargetX8664Traits::X86OperandMem::emit(const Cfg *Func) const { ...@@ -110,6 +110,7 @@ void TargetX8664Traits::X86OperandMem::emit(const Cfg *Func) const {
static_cast<const ::Ice::X8664::TargetX8664 *>(Func->getTarget()); static_cast<const ::Ice::X8664::TargetX8664 *>(Func->getTarget());
// If the base is rematerializable, we need to replace it with the correct // If the base is rematerializable, we need to replace it with the correct
// physical register (stack or base pointer), and update the Offset. // physical register (stack or base pointer), and update the Offset.
const bool NeedSandboxing = Target->needSandboxing();
int32_t Disp = 0; int32_t Disp = 0;
if (getBase() && getBase()->isRematerializable()) { if (getBase() && getBase()->isRematerializable()) {
Disp += getRematerializableOffset(getBase(), Target); Disp += getRematerializableOffset(getBase(), Target);
...@@ -140,30 +141,40 @@ void TargetX8664Traits::X86OperandMem::emit(const Cfg *Func) const { ...@@ -140,30 +141,40 @@ void TargetX8664Traits::X86OperandMem::emit(const Cfg *Func) const {
llvm_unreachable("Invalid offset type for x86 mem operand"); llvm_unreachable("Invalid offset type for x86 mem operand");
} }
if (Base || Index) { if (Base == nullptr && Index == nullptr) {
return;
}
Str << "("; Str << "(";
if (Base) { if (Base != nullptr) {
const Variable *Base32 = Base; const Variable *B = Base;
if (Base->getType() != IceType_i32) { if (!NeedSandboxing) {
// TODO(jpp): stop abusing the operand's type to identify LEAs.
const Type MemType = getType();
if (Base->getType() != IceType_i32 && MemType != IceType_void) {
// X86-64 is ILP32, but %rsp and %rbp are accessed as 64-bit registers. // X86-64 is ILP32, but %rsp and %rbp are accessed as 64-bit registers.
// For filetype=asm, they need to be emitted as their 32-bit sibilings. // For filetype=asm, they need to be emitted as their 32-bit sibilings.
assert(Base->getType() == IceType_i64); assert(Base->getType() == IceType_i64);
assert(Base->getRegNum() == RegX8664::Encoded_Reg_rsp || assert(Base->getRegNum() == RegX8664::Encoded_Reg_rsp ||
Base->getRegNum() == RegX8664::Encoded_Reg_rbp); Base->getRegNum() == RegX8664::Encoded_Reg_rbp ||
Base32 = Base->asType(IceType_i32, X8664::Traits::getGprForType( getType() == IceType_void);
B = B->asType(IceType_i32, X8664::Traits::getGprForType(
IceType_i32, Base->getRegNum())); IceType_i32, Base->getRegNum()));
} }
Base32->emit(Func);
} }
if (Index) {
assert(Index->getType() == IceType_i32); B->emit(Func);
}
if (Index != nullptr) {
Variable *I = Index;
Str << ","; Str << ",";
Index->emit(Func); I->emit(Func);
if (Shift) if (Shift)
Str << "," << (1u << Shift); Str << "," << (1u << Shift);
} }
Str << ")"; Str << ")";
}
} }
void TargetX8664Traits::X86OperandMem::dump(const Cfg *Func, void TargetX8664Traits::X86OperandMem::dump(const Cfg *Func,
...@@ -228,15 +239,18 @@ void TargetX8664Traits::X86OperandMem::dump(const Cfg *Func, ...@@ -228,15 +239,18 @@ void TargetX8664Traits::X86OperandMem::dump(const Cfg *Func,
TargetX8664Traits::Address TargetX8664Traits::X86OperandMem::toAsmAddress( TargetX8664Traits::Address TargetX8664Traits::X86OperandMem::toAsmAddress(
TargetX8664Traits::Assembler *Asm, TargetX8664Traits::Assembler *Asm,
const Ice::TargetLowering *TargetLowering) const { const Ice::TargetLowering *TargetLowering, bool IsLeaAddr) const {
(void)IsLeaAddr;
const auto *Target = const auto *Target =
static_cast<const ::Ice::X8664::TargetX8664 *>(TargetLowering); static_cast<const ::Ice::X8664::TargetX8664 *>(TargetLowering);
int32_t Disp = 0; int32_t Disp = 0;
if (getBase() && getBase()->isRematerializable()) { if (getBase() && getBase()->isRematerializable()) {
Disp += getRematerializableOffset(getBase(), Target); Disp += getRematerializableOffset(getBase(), Target);
} }
if (getIndex()) if (getIndex() != nullptr) {
assert(!getIndex()->isRematerializable()); assert(!getIndex()->isRematerializable());
}
AssemblerFixup *Fixup = nullptr; AssemblerFixup *Fixup = nullptr;
// Determine the offset (is it relocatable?) // Determine the offset (is it relocatable?)
if (getOffset() != nullptr) { if (getOffset() != nullptr) {
...@@ -253,20 +267,28 @@ TargetX8664Traits::Address TargetX8664Traits::X86OperandMem::toAsmAddress( ...@@ -253,20 +267,28 @@ TargetX8664Traits::Address TargetX8664Traits::X86OperandMem::toAsmAddress(
// Now convert to the various possible forms. // Now convert to the various possible forms.
if (getBase() && getIndex()) { if (getBase() && getIndex()) {
const bool NeedSandboxing = Target->needSandboxing();
(void)NeedSandboxing;
assert(!NeedSandboxing || IsLeaAddr ||
(getBase()->getRegNum() == Traits::RegisterSet::Reg_r15));
return X8664::Traits::Address(getEncodedGPR(getBase()->getRegNum()), return X8664::Traits::Address(getEncodedGPR(getBase()->getRegNum()),
getEncodedGPR(getIndex()->getRegNum()), getEncodedGPR(getIndex()->getRegNum()),
X8664::Traits::ScaleFactor(getShift()), Disp, X8664::Traits::ScaleFactor(getShift()), Disp,
Fixup); Fixup);
} else if (getBase()) { }
if (getBase()) {
return X8664::Traits::Address(getEncodedGPR(getBase()->getRegNum()), Disp, return X8664::Traits::Address(getEncodedGPR(getBase()->getRegNum()), Disp,
Fixup); Fixup);
} else if (getIndex()) { }
if (getIndex()) {
return X8664::Traits::Address(getEncodedGPR(getIndex()->getRegNum()), return X8664::Traits::Address(getEncodedGPR(getIndex()->getRegNum()),
X8664::Traits::ScaleFactor(getShift()), Disp, X8664::Traits::ScaleFactor(getShift()), Disp,
Fixup); Fixup);
} else {
return X8664::Traits::Address(Disp, Fixup);
} }
return X8664::Traits::Address(Disp, Fixup);
} }
TargetX8664Traits::Address TargetX8664Traits::Address
......
...@@ -337,11 +337,13 @@ template <typename TraitsType> struct InstImpl { ...@@ -337,11 +337,13 @@ template <typename TraitsType> struct InstImpl {
void emit(const Cfg *Func) const override; void emit(const Cfg *Func) const override;
void emitIAS(const Cfg *Func) const override; void emitIAS(const Cfg *Func) const override;
void dump(const Cfg *Func) const override; void dump(const Cfg *Func) const override;
void setIsReturnLocation(bool Value) { IsReturnLocation = Value; }
private: private:
InstX86Label(Cfg *Func, TargetLowering *Target); InstX86Label(Cfg *Func, TargetLowering *Target);
SizeT Number; // used for unique label generation. SizeT Number; // used for unique label generation.
bool IsReturnLocation = false;
}; };
/// Conditional and unconditional branch instruction. /// Conditional and unconditional branch instruction.
...@@ -528,8 +530,8 @@ template <typename TraitsType> struct InstImpl { ...@@ -528,8 +530,8 @@ template <typename TraitsType> struct InstImpl {
/// Emit a two-operand (GPR) instruction, where the dest operand is a Variable /// Emit a two-operand (GPR) instruction, where the dest operand is a Variable
/// that's guaranteed to be a register. /// that's guaranteed to be a register.
template <bool VarCanBeByte = true, bool SrcCanBeByte = true> template <bool VarCanBeByte = true, bool SrcCanBeByte = true>
static void emitIASRegOpTyGPR(const Cfg *Func, Type Ty, const Variable *Dst, static void emitIASRegOpTyGPR(const Cfg *Func, bool IsLea, Type Ty,
const Operand *Src, const Variable *Dst, const Operand *Src,
const GPREmitterRegOp &Emitter); const GPREmitterRegOp &Emitter);
/// Instructions of the form x := op(x). /// Instructions of the form x := op(x).
...@@ -613,7 +615,8 @@ template <typename TraitsType> struct InstImpl { ...@@ -613,7 +615,8 @@ template <typename TraitsType> struct InstImpl {
const Variable *Var = this->getDest(); const Variable *Var = this->getDest();
Type Ty = Var->getType(); Type Ty = Var->getType();
const Operand *Src = this->getSrc(0); const Operand *Src = this->getSrc(0);
emitIASRegOpTyGPR(Func, Ty, Var, Src, Emitter); constexpr bool IsLea = K == InstX86Base::Lea;
emitIASRegOpTyGPR(Func, IsLea, Ty, Var, Src, Emitter);
} }
void dump(const Cfg *Func) const override { void dump(const Cfg *Func) const override {
if (!BuildDefs::dump()) if (!BuildDefs::dump())
...@@ -743,7 +746,10 @@ template <typename TraitsType> struct InstImpl { ...@@ -743,7 +746,10 @@ template <typename TraitsType> struct InstImpl {
void emitIAS(const Cfg *Func) const override { void emitIAS(const Cfg *Func) const override {
Type Ty = this->getDest()->getType(); Type Ty = this->getDest()->getType();
assert(this->getSrcSize() == 2); assert(this->getSrcSize() == 2);
emitIASRegOpTyGPR(Func, Ty, this->getDest(), this->getSrc(1), Emitter); constexpr bool ThisIsLEA = K == InstX86Base::Lea;
static_assert(!ThisIsLEA, "Lea should be a unaryop.");
emitIASRegOpTyGPR(Func, !ThisIsLEA, Ty, this->getDest(), this->getSrc(1),
Emitter);
} }
void dump(const Cfg *Func) const override { void dump(const Cfg *Func) const override {
if (!BuildDefs::dump()) if (!BuildDefs::dump())
...@@ -1177,9 +1183,15 @@ template <typename TraitsType> struct InstImpl { ...@@ -1177,9 +1183,15 @@ template <typename TraitsType> struct InstImpl {
void emitIAS(const Cfg *Func) const override; void emitIAS(const Cfg *Func) const override;
void setMustKeep() { MustKeep = true; }
private: private:
bool MustKeep = false;
InstX86Movzx(Cfg *Func, Variable *Dest, Operand *Src) InstX86Movzx(Cfg *Func, Variable *Dest, Operand *Src)
: InstX86BaseUnaryopGPR<InstX86Base::Movzx>(Func, Dest, Src) {} : InstX86BaseUnaryopGPR<InstX86Base::Movzx>(Func, Dest, Src) {}
bool mayBeElided(const Variable *Dest, const Operand *Src) const;
}; };
class InstX86Movd : public InstX86BaseUnaryopXmm<InstX86Base::Movd> { class InstX86Movd : public InstX86BaseUnaryopXmm<InstX86Base::Movd> {
...@@ -2638,7 +2650,10 @@ template <typename TraitsType> struct InstImpl { ...@@ -2638,7 +2650,10 @@ template <typename TraitsType> struct InstImpl {
InstX86Push &operator=(const InstX86Push &) = delete; InstX86Push &operator=(const InstX86Push &) = delete;
public: public:
static InstX86Push *create(Cfg *Func, Variable *Source) { static InstX86Push *create(Cfg *Func, InstX86Label *Label) {
return new (Func->allocate<InstX86Push>()) InstX86Push(Func, Label);
}
static InstX86Push *create(Cfg *Func, Operand *Source) {
return new (Func->allocate<InstX86Push>()) InstX86Push(Func, Source); return new (Func->allocate<InstX86Push>()) InstX86Push(Func, Source);
} }
void emit(const Cfg *Func) const override; void emit(const Cfg *Func) const override;
...@@ -2649,7 +2664,10 @@ template <typename TraitsType> struct InstImpl { ...@@ -2649,7 +2664,10 @@ template <typename TraitsType> struct InstImpl {
} }
private: private:
InstX86Push(Cfg *Func, Variable *Source); InstX86Label *Label = nullptr;
InstX86Push(Cfg *Func, Operand *Source);
InstX86Push(Cfg *Func, InstX86Label *Label);
}; };
/// Ret instruction. Currently only supports the "ret" version that does not /// Ret instruction. Currently only supports the "ret" version that does not
......
...@@ -324,12 +324,16 @@ InstImpl<TraitsType>::InstX86Pop::InstX86Pop(Cfg *Func, Variable *Dest) ...@@ -324,12 +324,16 @@ InstImpl<TraitsType>::InstX86Pop::InstX86Pop(Cfg *Func, Variable *Dest)
} }
template <typename TraitsType> template <typename TraitsType>
InstImpl<TraitsType>::InstX86Push::InstX86Push(Cfg *Func, Variable *Source) InstImpl<TraitsType>::InstX86Push::InstX86Push(Cfg *Func, Operand *Source)
: InstX86Base(Func, InstX86Base::Push, 1, nullptr) { : InstX86Base(Func, InstX86Base::Push, 1, nullptr) {
this->addSource(Source); this->addSource(Source);
} }
template <typename TraitsType> template <typename TraitsType>
InstImpl<TraitsType>::InstX86Push::InstX86Push(Cfg *Func, InstX86Label *L)
: InstX86Base(Func, InstX86Base::Push, 0, nullptr), Label(L) {}
template <typename TraitsType>
InstImpl<TraitsType>::InstX86Ret::InstX86Ret(Cfg *Func, Variable *Source) InstImpl<TraitsType>::InstX86Ret::InstX86Ret(Cfg *Func, Variable *Source)
: InstX86Base(Func, InstX86Base::Ret, Source ? 1 : 0, nullptr) { : InstX86Base(Func, InstX86Base::Ret, Source ? 1 : 0, nullptr) {
if (Source) if (Source)
...@@ -455,6 +459,9 @@ void InstImpl<TraitsType>::InstX86Label::emit(const Cfg *Func) const { ...@@ -455,6 +459,9 @@ void InstImpl<TraitsType>::InstX86Label::emit(const Cfg *Func) const {
template <typename TraitsType> template <typename TraitsType>
void InstImpl<TraitsType>::InstX86Label::emitIAS(const Cfg *Func) const { void InstImpl<TraitsType>::InstX86Label::emitIAS(const Cfg *Func) const {
Assembler *Asm = Func->getAssembler<Assembler>(); Assembler *Asm = Func->getAssembler<Assembler>();
if (IsReturnLocation) {
Asm->addRelocationAtCurrentPosition(getName(Func));
}
Asm->bindLocalLabel(Number); Asm->bindLocalLabel(Number);
} }
...@@ -552,8 +559,18 @@ void InstImpl<TraitsType>::InstX86Jmp::emit(const Cfg *Func) const { ...@@ -552,8 +559,18 @@ void InstImpl<TraitsType>::InstX86Jmp::emit(const Cfg *Func) const {
return; return;
Ostream &Str = Func->getContext()->getStrEmit(); Ostream &Str = Func->getContext()->getStrEmit();
assert(this->getSrcSize() == 1); assert(this->getSrcSize() == 1);
const Operand *Src = this->getSrc(0);
if (Traits::Is64Bit) {
if (const auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src)) {
Str << "\t" Str << "\t"
"jmp\t*"; "jmp"
"\t" << CR->getName();
return;
}
}
Str << "\t"
"jmp"
"\t*";
getJmpTarget()->emit(Func); getJmpTarget()->emit(Func);
} }
...@@ -707,8 +724,8 @@ void InstImpl<TraitsType>::emitIASOpTyGPR(const Cfg *Func, Type Ty, ...@@ -707,8 +724,8 @@ void InstImpl<TraitsType>::emitIASOpTyGPR(const Cfg *Func, Type Ty,
template <typename TraitsType> template <typename TraitsType>
template <bool VarCanBeByte, bool SrcCanBeByte> template <bool VarCanBeByte, bool SrcCanBeByte>
void InstImpl<TraitsType>::emitIASRegOpTyGPR(const Cfg *Func, Type Ty, void InstImpl<TraitsType>::emitIASRegOpTyGPR(const Cfg *Func, bool IsLea,
const Variable *Var, Type Ty, const Variable *Var,
const Operand *Src, const Operand *Src,
const GPREmitterRegOp &Emitter) { const GPREmitterRegOp &Emitter) {
auto *Target = InstX86Base::getTarget(Func); auto *Target = InstX86Base::getTarget(Func);
...@@ -729,7 +746,8 @@ void InstImpl<TraitsType>::emitIASRegOpTyGPR(const Cfg *Func, Type Ty, ...@@ -729,7 +746,8 @@ void InstImpl<TraitsType>::emitIASRegOpTyGPR(const Cfg *Func, Type Ty,
} }
} else if (const auto *Mem = llvm::dyn_cast<X86OperandMem>(Src)) { } else if (const auto *Mem = llvm::dyn_cast<X86OperandMem>(Src)) {
Mem->emitSegmentOverride(Asm); Mem->emitSegmentOverride(Asm);
(Asm->*(Emitter.GPRAddr))(Ty, VarReg, Mem->toAsmAddress(Asm, Target)); (Asm->*(Emitter.GPRAddr))(Ty, VarReg,
Mem->toAsmAddress(Asm, Target, IsLea));
} else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src)) { } else if (const auto *Imm = llvm::dyn_cast<ConstantInteger32>(Src)) {
(Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue())); (Asm->*(Emitter.GPRImm))(Ty, VarReg, AssemblerImmediate(Imm->getValue()));
} else if (const auto *Reloc = llvm::dyn_cast<ConstantRelocatable>(Src)) { } else if (const auto *Reloc = llvm::dyn_cast<ConstantRelocatable>(Src)) {
...@@ -1136,7 +1154,8 @@ void InstImpl<TraitsType>::InstX86Imul::emitIAS(const Cfg *Func) const { ...@@ -1136,7 +1154,8 @@ void InstImpl<TraitsType>::InstX86Imul::emitIAS(const Cfg *Func) const {
assert(Var == this->getSrc(0)); assert(Var == this->getSrc(0));
static const GPREmitterRegOp Emitter = {&Assembler::imul, &Assembler::imul, static const GPREmitterRegOp Emitter = {&Assembler::imul, &Assembler::imul,
&Assembler::imul}; &Assembler::imul};
emitIASRegOpTyGPR(Func, Ty, Var, Src, Emitter); constexpr bool NotLea = false;
emitIASRegOpTyGPR(Func, NotLea, Ty, Var, Src, Emitter);
} }
} }
...@@ -1695,7 +1714,8 @@ void InstImpl<TraitsType>::InstX86Icmp::emitIAS(const Cfg *Func) const { ...@@ -1695,7 +1714,8 @@ void InstImpl<TraitsType>::InstX86Icmp::emitIAS(const Cfg *Func) const {
&Assembler::cmp}; &Assembler::cmp};
if (const auto *SrcVar0 = llvm::dyn_cast<Variable>(Src0)) { if (const auto *SrcVar0 = llvm::dyn_cast<Variable>(Src0)) {
if (SrcVar0->hasReg()) { if (SrcVar0->hasReg()) {
emitIASRegOpTyGPR(Func, Ty, SrcVar0, Src1, RegEmitter); constexpr bool NotLea = false;
emitIASRegOpTyGPR(Func, NotLea, Ty, SrcVar0, Src1, RegEmitter);
return; return;
} }
} }
...@@ -1797,7 +1817,8 @@ void InstImpl<TraitsType>::InstX86Test::emitIAS(const Cfg *Func) const { ...@@ -1797,7 +1817,8 @@ void InstImpl<TraitsType>::InstX86Test::emitIAS(const Cfg *Func) const {
&Assembler::test}; &Assembler::test};
if (const auto *SrcVar0 = llvm::dyn_cast<Variable>(Src0)) { if (const auto *SrcVar0 = llvm::dyn_cast<Variable>(Src0)) {
if (SrcVar0->hasReg()) { if (SrcVar0->hasReg()) {
emitIASRegOpTyGPR(Func, Ty, SrcVar0, Src1, RegEmitter); constexpr bool NotLea = false;
emitIASRegOpTyGPR(Func, NotLea, Ty, SrcVar0, Src1, RegEmitter);
return; return;
} }
} }
...@@ -1980,7 +2001,7 @@ void InstImpl<TraitsType>::InstX86Lea::emit(const Cfg *Func) const { ...@@ -1980,7 +2001,7 @@ void InstImpl<TraitsType>::InstX86Lea::emit(const Cfg *Func) const {
assert(this->getSrcSize() == 1); assert(this->getSrcSize() == 1);
assert(this->getDest()->hasReg()); assert(this->getDest()->hasReg());
Str << "\t" Str << "\t"
"leal\t"; "lea" << this->getWidthString(this->getDest()->getType()) << "\t";
Operand *Src0 = this->getSrc(0); Operand *Src0 = this->getSrc(0);
if (const auto *Src0Var = llvm::dyn_cast<Variable>(Src0)) { if (const auto *Src0Var = llvm::dyn_cast<Variable>(Src0)) {
Type Ty = Src0Var->getType(); Type Ty = Src0Var->getType();
...@@ -2080,7 +2101,8 @@ void InstImpl<TraitsType>::InstX86Mov::emitIAS(const Cfg *Func) const { ...@@ -2080,7 +2101,8 @@ void InstImpl<TraitsType>::InstX86Mov::emitIAS(const Cfg *Func) const {
if (isScalarIntegerType(SrcTy)) { if (isScalarIntegerType(SrcTy)) {
SrcTy = DestTy; SrcTy = DestTy;
} }
emitIASRegOpTyGPR(Func, DestTy, Dest, Src, GPRRegEmitter); constexpr bool NotLea = false;
emitIASRegOpTyGPR(Func, NotLea, DestTy, Dest, Src, GPRRegEmitter);
return; return;
} }
} else { } else {
...@@ -2257,7 +2279,32 @@ void InstImpl<TraitsType>::InstX86Movsx::emitIAS(const Cfg *Func) const { ...@@ -2257,7 +2279,32 @@ void InstImpl<TraitsType>::InstX86Movsx::emitIAS(const Cfg *Func) const {
Type SrcTy = Src->getType(); Type SrcTy = Src->getType();
assert(typeWidthInBytes(Dest->getType()) > 1); assert(typeWidthInBytes(Dest->getType()) > 1);
assert(typeWidthInBytes(Dest->getType()) > typeWidthInBytes(SrcTy)); assert(typeWidthInBytes(Dest->getType()) > typeWidthInBytes(SrcTy));
emitIASRegOpTyGPR<false, true>(Func, SrcTy, Dest, Src, this->Emitter); constexpr bool NotLea = false;
emitIASRegOpTyGPR<false, true>(Func, NotLea, SrcTy, Dest, Src, this->Emitter);
}
template <typename TraitsType>
bool InstImpl<TraitsType>::InstX86Movzx::mayBeElided(
const Variable *Dest, const Operand *SrcOpnd) const {
assert(Traits::Is64Bit);
const auto *Src = llvm::dyn_cast<Variable>(SrcOpnd);
// Src is not a Variable, so it does not have a register. Movzx can't be
// elided.
if (Src == nullptr)
return false;
// Movzx to/from memory can't be elided.
if (!Src->hasReg() || !Dest->hasReg())
return false;
// Reg/reg move with different source and dest can't be elided.
if (Traits::getEncodedGPR(Src->getRegNum()) !=
Traits::getEncodedGPR(Dest->getRegNum()))
return false;
// A must-keep movzx 32- to 64-bit is sometimes needed in x86-64 sandboxing.
return !MustKeep;
} }
template <typename TraitsType> template <typename TraitsType>
...@@ -2272,6 +2319,9 @@ void InstImpl<TraitsType>::InstX86Movzx::emit(const Cfg *Func) const { ...@@ -2272,6 +2319,9 @@ void InstImpl<TraitsType>::InstX86Movzx::emit(const Cfg *Func) const {
const Variable *Dest = this->Dest; const Variable *Dest = this->Dest;
if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64) { if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64) {
Ostream &Str = Func->getContext()->getStrEmit(); Ostream &Str = Func->getContext()->getStrEmit();
if (mayBeElided(Dest, Src)) {
Str << "\t/* elided movzx */";
} else {
Str << "\t" Str << "\t"
"mov" "mov"
"\t"; "\t";
...@@ -2281,6 +2331,7 @@ void InstImpl<TraitsType>::InstX86Movzx::emit(const Cfg *Func) const { ...@@ -2281,6 +2331,7 @@ void InstImpl<TraitsType>::InstX86Movzx::emit(const Cfg *Func) const {
Traits::getGprForType(IceType_i32, Dest->getRegNum())) Traits::getGprForType(IceType_i32, Dest->getRegNum()))
->emit(Func); ->emit(Func);
Str << " /* movzx */"; Str << " /* movzx */";
}
return; return;
} }
} }
...@@ -2295,7 +2346,14 @@ void InstImpl<TraitsType>::InstX86Movzx::emitIAS(const Cfg *Func) const { ...@@ -2295,7 +2346,14 @@ void InstImpl<TraitsType>::InstX86Movzx::emitIAS(const Cfg *Func) const {
Type SrcTy = Src->getType(); Type SrcTy = Src->getType();
assert(typeWidthInBytes(Dest->getType()) > 1); assert(typeWidthInBytes(Dest->getType()) > 1);
assert(typeWidthInBytes(Dest->getType()) > typeWidthInBytes(SrcTy)); assert(typeWidthInBytes(Dest->getType()) > typeWidthInBytes(SrcTy));
emitIASRegOpTyGPR<false, true>(Func, SrcTy, Dest, Src, this->Emitter); if (Traits::Is64Bit) {
if (Src->getType() == IceType_i32 && Dest->getType() == IceType_i64 &&
mayBeElided(Dest, Src)) {
return;
}
}
constexpr bool NotLea = false;
emitIASRegOpTyGPR<false, true>(Func, NotLea, SrcTy, Dest, Src, this->Emitter);
} }
template <typename TraitsType> template <typename TraitsType>
...@@ -2617,23 +2675,30 @@ void InstImpl<TraitsType>::InstX86Push::emit(const Cfg *Func) const { ...@@ -2617,23 +2675,30 @@ void InstImpl<TraitsType>::InstX86Push::emit(const Cfg *Func) const {
if (!BuildDefs::dump()) if (!BuildDefs::dump())
return; return;
Ostream &Str = Func->getContext()->getStrEmit(); Ostream &Str = Func->getContext()->getStrEmit();
assert(this->getSrcSize() == 1);
// Push is currently only used for saving GPRs.
const auto *Var = llvm::cast<Variable>(this->getSrc(0));
assert(Var->hasReg());
Str << "\t" Str << "\t"
"push\t"; "push"
Var->emit(Func); "\t";
assert(this->getSrcSize() == 1);
const Operand *Src = this->getSrc(0);
Src->emit(Func);
} }
template <typename TraitsType> template <typename TraitsType>
void InstImpl<TraitsType>::InstX86Push::emitIAS(const Cfg *Func) const { void InstImpl<TraitsType>::InstX86Push::emitIAS(const Cfg *Func) const {
assert(this->getSrcSize() == 1);
// Push is currently only used for saving GPRs.
const auto *Var = llvm::cast<Variable>(this->getSrc(0));
assert(Var->hasReg());
Assembler *Asm = Func->getAssembler<Assembler>(); Assembler *Asm = Func->getAssembler<Assembler>();
assert(this->getSrcSize() == 1);
const Operand *Src = this->getSrc(0);
if (const auto *Var = llvm::dyn_cast<Variable>(Src)) {
Asm->pushl(Traits::getEncodedGPR(Var->getRegNum())); Asm->pushl(Traits::getEncodedGPR(Var->getRegNum()));
} else if (const auto *Const32 = llvm::dyn_cast<ConstantInteger32>(Src)) {
Asm->pushl(AssemblerImmediate(Const32->getValue()));
} else if (auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src)) {
Asm->pushl(CR);
} else {
llvm_unreachable("Unexpected operand type");
}
} }
template <typename TraitsType> template <typename TraitsType>
......
...@@ -27,8 +27,8 @@ public: ...@@ -27,8 +27,8 @@ public:
/// to binary encode register operands in instructions. /// to binary encode register operands in instructions.
enum AllRegisters { enum AllRegisters {
#define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \
isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, is16To8, \ sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \
isTrunc8Rcvr, isAhRcvr, aliases) \ is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \
val, val,
REGX8664_TABLE REGX8664_TABLE
#undef X #undef X
...@@ -39,8 +39,8 @@ public: ...@@ -39,8 +39,8 @@ public:
/// binary encode register operands in instructions. /// binary encode register operands in instructions.
enum GPRRegister { enum GPRRegister {
#define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \
isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, is16To8, \ sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \
isTrunc8Rcvr, isAhRcvr, aliases) \ is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \
Encoded_##val = encode, Encoded_##val = encode,
REGX8664_GPR_TABLE REGX8664_GPR_TABLE
#undef X #undef X
...@@ -51,8 +51,8 @@ public: ...@@ -51,8 +51,8 @@ public:
/// binary encode register operands in instructions. /// binary encode register operands in instructions.
enum XmmRegister { enum XmmRegister {
#define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \
isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, is16To8, \ sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \
isTrunc8Rcvr, isAhRcvr, aliases) \ is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \
Encoded_##val = encode, Encoded_##val = encode,
REGX8664_XMM_TABLE REGX8664_XMM_TABLE
#undef X #undef X
...@@ -63,8 +63,8 @@ public: ...@@ -63,8 +63,8 @@ public:
/// binary encode register operands in instructions. /// binary encode register operands in instructions.
enum ByteRegister { enum ByteRegister {
#define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \ #define X(val, encode, name, base, scratch, preserved, stackptr, frameptr, \
isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, is16To8, \ sboxres, isGPR, is64, is32, is16, is8, isXmm, is64To8, is32To8, \
isTrunc8Rcvr, isAhRcvr, aliases) \ is16To8, isTrunc8Rcvr, isAhRcvr, aliases) \
Encoded_8_##val = encode, Encoded_8_##val = encode,
REGX8664_BYTEREG_TABLE REGX8664_BYTEREG_TABLE
#undef X #undef X
......
...@@ -130,6 +130,33 @@ FixupKind TargetX86Base<X8632::Traits>::AbsFixup = ...@@ -130,6 +130,33 @@ FixupKind TargetX86Base<X8632::Traits>::AbsFixup =
// \/_____/\/_____/\/_/ \/_/\/_____/\/_/ /_/\/_/\/_/ \/_/\/_____/ // \/_____/\/_____/\/_/ \/_/\/_____/\/_/ /_/\/_/\/_/ \/_/\/_____/
// //
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
void TargetX8632::_add_sp(Operand *Adjustment) {
Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
_add(esp, Adjustment);
}
void TargetX8632::_mov_sp(Operand *NewValue) {
Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
_redefined(_mov(esp, NewValue));
}
void TargetX8632::_sub_sp(Operand *Adjustment) {
Variable *esp = getPhysicalRegister(Traits::RegisterSet::Reg_esp);
_sub(esp, Adjustment);
}
void TargetX8632::lowerIndirectJump(Variable *JumpTarget) {
if (NeedSandboxing) {
_bundle_lock();
const SizeT BundleSize =
1 << Func->getAssembler<>()->getBundleAlignLog2Bytes();
_and(JumpTarget, Ctx->getConstantInt32(~(BundleSize - 1)));
}
_jmp(JumpTarget);
if (NeedSandboxing)
_bundle_unlock();
}
void TargetX8632::lowerCall(const InstCall *Instr) { void TargetX8632::lowerCall(const InstCall *Instr) {
// x86-32 calling convention: // x86-32 calling convention:
// //
...@@ -253,7 +280,6 @@ void TargetX8632::lowerCall(const InstCall *Instr) { ...@@ -253,7 +280,6 @@ void TargetX8632::lowerCall(const InstCall *Instr) {
} }
Operand *CallTarget = Operand *CallTarget =
legalize(Instr->getCallTarget(), Legal_Reg | Legal_Imm | Legal_AddrAbs); legalize(Instr->getCallTarget(), Legal_Reg | Legal_Imm | Legal_AddrAbs);
const bool NeedSandboxing = Ctx->getFlags().getUseSandboxing();
if (NeedSandboxing) { if (NeedSandboxing) {
if (llvm::isa<Constant>(CallTarget)) { if (llvm::isa<Constant>(CallTarget)) {
_bundle_lock(InstBundleLock::Opt_AlignToEnd); _bundle_lock(InstBundleLock::Opt_AlignToEnd);
...@@ -723,7 +749,7 @@ void TargetX8632::addEpilog(CfgNode *Node) { ...@@ -723,7 +749,7 @@ void TargetX8632::addEpilog(CfgNode *Node) {
} }
} }
if (!Ctx->getFlags().getUseSandboxing()) if (!NeedSandboxing)
return; return;
// Change the original ret instruction into a sandboxed return sequence. // Change the original ret instruction into a sandboxed return sequence.
// t:ecx = pop // t:ecx = pop
......
...@@ -49,6 +49,15 @@ public: ...@@ -49,6 +49,15 @@ public:
} }
protected: protected:
void _add_sp(Operand *Adjustment);
void _mov_sp(Operand *NewValue);
Traits::X86OperandMem *_sandbox_mem_reference(X86OperandMem *) {
llvm::report_fatal_error("sandbox mem reference for x86-32.");
}
void _sub_sp(Operand *Adjustment);
void initSandbox() {}
void lowerIndirectJump(Variable *JumpTarget);
void lowerCall(const InstCall *Instr) override; void lowerCall(const InstCall *Instr) override;
void lowerArguments() override; void lowerArguments() override;
void lowerRet(const InstRet *Inst) override; void lowerRet(const InstRet *Inst) override;
......
...@@ -445,6 +445,7 @@ private: ...@@ -445,6 +445,7 @@ private:
public: public:
static void initRegisterSet( static void initRegisterSet(
const ::Ice::ClFlags & /*Flags*/,
std::array<llvm::SmallBitVector, RCX86_NUM> *TypeToRegisterSet, std::array<llvm::SmallBitVector, RCX86_NUM> *TypeToRegisterSet,
std::array<llvm::SmallBitVector, RegisterSet::Reg_NUM> *RegisterAliases, std::array<llvm::SmallBitVector, RegisterSet::Reg_NUM> *RegisterAliases,
llvm::SmallBitVector *ScratchRegs) { llvm::SmallBitVector *ScratchRegs) {
...@@ -536,7 +537,8 @@ public: ...@@ -536,7 +537,8 @@ public:
} }
static llvm::SmallBitVector static llvm::SmallBitVector
getRegisterSet(TargetLowering::RegSetMask Include, getRegisterSet(const ::Ice::ClFlags & /*Flags*/,
TargetLowering::RegSetMask Include,
TargetLowering::RegSetMask Exclude) { TargetLowering::RegSetMask Exclude) {
llvm::SmallBitVector Registers(RegisterSet::Reg_NUM); llvm::SmallBitVector Registers(RegisterSet::Reg_NUM);
...@@ -813,9 +815,16 @@ public: ...@@ -813,9 +815,16 @@ public:
Constant *Offset, Variable *Index = nullptr, Constant *Offset, Variable *Index = nullptr,
uint16_t Shift = 0, uint16_t Shift = 0,
SegmentRegisters SegmentReg = DefaultSegment, SegmentRegisters SegmentReg = DefaultSegment,
bool IsPIC = false) { bool IsRebased = false) {
return new (Func->allocate<X86OperandMem>()) X86OperandMem( return new (Func->allocate<X86OperandMem>()) X86OperandMem(
Func, Ty, Base, Offset, Index, Shift, SegmentReg, IsPIC); Func, Ty, Base, Offset, Index, Shift, SegmentReg, IsRebased);
}
static X86OperandMem *create(Cfg *Func, Type Ty, Variable *Base,
Constant *Offset, bool IsRebased) {
constexpr Variable *NoIndex = nullptr;
constexpr uint16_t NoShift = 0;
return new (Func->allocate<X86OperandMem>()) X86OperandMem(
Func, Ty, Base, Offset, NoIndex, NoShift, DefaultSegment, IsRebased);
} }
Variable *getBase() const { return Base; } Variable *getBase() const { return Base; }
Constant *getOffset() const { return Offset; } Constant *getOffset() const { return Offset; }
...@@ -823,10 +832,9 @@ public: ...@@ -823,10 +832,9 @@ public:
uint16_t getShift() const { return Shift; } uint16_t getShift() const { return Shift; }
SegmentRegisters getSegmentRegister() const { return SegmentReg; } SegmentRegisters getSegmentRegister() const { return SegmentReg; }
void emitSegmentOverride(Assembler *Asm) const; void emitSegmentOverride(Assembler *Asm) const;
void setIsPIC() { IsPIC = true; } bool getIsRebased() const { return IsRebased; }
bool getIsPIC() const { return IsPIC; } Address toAsmAddress(Assembler *Asm, const Ice::TargetLowering *Target,
Address toAsmAddress(Assembler *Asm, bool LeaAddr = false) const;
const Ice::TargetLowering *Target) const;
void emit(const Cfg *Func) const override; void emit(const Cfg *Func) const override;
using X86Operand::dump; using X86Operand::dump;
...@@ -843,14 +851,14 @@ public: ...@@ -843,14 +851,14 @@ public:
private: private:
X86OperandMem(Cfg *Func, Type Ty, Variable *Base, Constant *Offset, X86OperandMem(Cfg *Func, Type Ty, Variable *Base, Constant *Offset,
Variable *Index, uint16_t Shift, SegmentRegisters SegmentReg, Variable *Index, uint16_t Shift, SegmentRegisters SegmentReg,
bool IsPIC); bool IsRebased);
Variable *Base; Variable *const Base;
Constant *Offset; Constant *const Offset;
Variable *Index; Variable *const Index;
uint16_t Shift; const uint16_t Shift;
SegmentRegisters SegmentReg : 16; const SegmentRegisters SegmentReg : 16;
bool IsPIC; const bool IsRebased;
/// A flag to show if this memory operand is a randomized one. Randomized /// A flag to show if this memory operand is a randomized one. Randomized
/// memory operands are generated in /// memory operands are generated in
/// TargetX86Base::randomizeOrPoolImmediate() /// TargetX86Base::randomizeOrPoolImmediate()
......
...@@ -45,10 +45,21 @@ public: ...@@ -45,10 +45,21 @@ public:
} }
std::unique_ptr<::Ice::Assembler> createAssembler() const override { std::unique_ptr<::Ice::Assembler> createAssembler() const override {
return makeUnique<X8664::AssemblerX8664>(); const bool EmitAddrSizeOverridePrefix = !NeedSandboxing;
return makeUnique<X8664::AssemblerX8664>(EmitAddrSizeOverridePrefix);
} }
bool needSandboxing() const { return NeedSandboxing; }
protected: protected:
void _add_sp(Operand *Adjustment);
void _mov_sp(Operand *NewValue);
void _push_rbp();
Traits::X86OperandMem *_sandbox_mem_reference(X86OperandMem *Mem);
void _sub_sp(Operand *Adjustment);
void initSandbox();
void lowerIndirectJump(Variable *JumpTarget);
void lowerCall(const InstCall *Instr) override; void lowerCall(const InstCall *Instr) override;
void lowerArguments() override; void lowerArguments() override;
void lowerRet(const InstRet *Inst) override; void lowerRet(const InstRet *Inst) override;
......
...@@ -15,14 +15,18 @@ ...@@ -15,14 +15,18 @@
#define SUBZERO_SRC_ICETYPES_DEF #define SUBZERO_SRC_ICETYPES_DEF
// Attributes of each target architecture. // Attributes of each target architecture.
// NOTE on is_elf64 -- At some point NaCl would like to use ELF32 for all ILP32 // NOTES on is_elf64:
// sandboxes, but for now the 64-bit architectures use ELF64: // 1- At some point NaCl would like to use ELF32 for all ILP32 sandboxes, but
// https://code.google.com/p/nativeclient/issues/detail?id=349 TODO: Whoever // for now the 64-bit architectures use ELF64:
// adds AArch64 will need to set ABI e_flags. // https://code.google.com/p/nativeclient/issues/detail?id=349
//
// 2- native code is always emitted as ELF32.
//
// TODO(jpp): set ABI e_flags for AArch64.
#define TARGETARCH_TABLE \ #define TARGETARCH_TABLE \
/* enum value, printable string, is_elf64, e_machine, e_flags */ \ /* enum value, printable string, is_elf64, e_machine, e_flags */ \
X(Target_X8632, "x86-32", false, EM_386, 0) \ X(Target_X8632, "x86-32", false, EM_386, 0) \
X(Target_X8664, "x86-64", false, EM_X86_64, 0) \ X(Target_X8664, "x86-64", true, EM_X86_64, 0) \
X(Target_ARM32, "arm32", false, EM_ARM, EF_ARM_EABI_VER5) \ X(Target_ARM32, "arm32", false, EM_ARM, EF_ARM_EABI_VER5) \
X(Target_ARM64, "arm64", true, EM_AARCH64, 0) \ X(Target_ARM64, "arm64", true, EM_AARCH64, 0) \
X(Target_MIPS32,"mips32", false, EM_MIPS, 0) \ X(Target_MIPS32,"mips32", false, EM_MIPS, 0) \
......
...@@ -3,10 +3,15 @@ ...@@ -3,10 +3,15 @@
; we try to limit to a few instructions with well known sizes and ; we try to limit to a few instructions with well known sizes and
; minimal use of registers and stack slots in the lowering sequence. ; minimal use of registers and stack slots in the lowering sequence.
; XFAIL: filtype=asm
; RUN: %p2i -i %s --sandbox --filetype=obj --disassemble --args -Om1 \ ; RUN: %p2i -i %s --sandbox --filetype=obj --disassemble --args -Om1 \
; RUN: -allow-externally-defined-symbols \ ; RUN: -allow-externally-defined-symbols \
; RUN: -ffunction-sections | FileCheck %s ; RUN: -ffunction-sections | FileCheck %s
; RUN: %p2i -i %s --sandbox --filetype=obj --disassemble --target=x8664 \
; RUN: --args -Om1 -allow-externally-defined-symbols \
; RUN: -ffunction-sections | FileCheck %s --check-prefix X8664
declare void @call_target() declare void @call_target()
@global_byte = internal global [1 x i8] zeroinitializer @global_byte = internal global [1 x i8] zeroinitializer
@global_short = internal global [2 x i8] zeroinitializer @global_short = internal global [2 x i8] zeroinitializer
...@@ -22,6 +27,10 @@ entry: ...@@ -22,6 +27,10 @@ entry:
; CHECK: nop ; CHECK: nop
; CHECK: 1b: {{.*}} call 1c ; CHECK: 1b: {{.*}} call 1c
; CHECK-NEXT: 20: ; CHECK-NEXT: 20:
; X8664-LABEL: test_direct_call
; X8664: push {{.*}}$local$__0
; X8664: jmp {{.*}} call_target
; X8664: {{0+}}20 <{{.*}}$local$__0>:
; An indirect call sequence uses the right mask and register-call sequence. ; An indirect call sequence uses the right mask and register-call sequence.
define internal void @test_indirect_call(i32 %target) { define internal void @test_indirect_call(i32 %target) {
...@@ -36,8 +45,14 @@ entry: ...@@ -36,8 +45,14 @@ entry:
; CHECK: 1b: {{.*}} and [[REG]],0xffffffe0 ; CHECK: 1b: {{.*}} and [[REG]],0xffffffe0
; CHECK-NEXT: call [[REG]] ; CHECK-NEXT: call [[REG]]
; CHECk-NEXT: 20: ; CHECk-NEXT: 20:
; X8664-LABEL: test_indirect_call
; X8664: push {{.*}}$local$__0
; X8664: {{.*}} and e[[REG:..]],0xffffffe0
; X8664: add r[[REG]],r15
; X8664: jmp r[[REG]]
; X8664: {{0+}}20 <{{.*}}$local$__0>:
; A return sequences uses the right pop / mask / jmp sequence. ; A return sequence uses the right pop / mask / jmp sequence.
define internal void @test_ret() { define internal void @test_ret() {
entry: entry:
ret void ret void
...@@ -46,6 +61,11 @@ entry: ...@@ -46,6 +61,11 @@ entry:
; CHECK: pop ecx ; CHECK: pop ecx
; CHECK-NEXT: and ecx,0xffffffe0 ; CHECK-NEXT: and ecx,0xffffffe0
; CHECK-NEXT: jmp ecx ; CHECK-NEXT: jmp ecx
; X8664-LABEL: test_ret
; X8664: pop rcx
; X8664: and ecx,0xffffffe0
; X8664: add rcx,r15
; X8664: jmp rcx
; A perfectly packed bundle should not have nops at the end. ; A perfectly packed bundle should not have nops at the end.
define internal void @packed_bundle() { define internal void @packed_bundle() {
...@@ -239,6 +259,63 @@ entry: ...@@ -239,6 +259,63 @@ entry:
; CHECK: 5b: {{.*}} and [[REG]],0xffffffe0 ; CHECK: 5b: {{.*}} and [[REG]],0xffffffe0
; CHECK-NEXT: 5e: {{.*}} call [[REG]] ; CHECK-NEXT: 5e: {{.*}} call [[REG]]
; Tests the pad_to_end bundle alignment with no padding bytes needed.
define internal void @bundle_lock_pad_to_end_padding_0(i32 %arg0, i32 %arg1,
i32 %arg3, i32 %arg4,
i32 %arg5, i32 %arg6) {
call void @call_target()
; bundle boundary
%x = add i32 %arg5, %arg6 ; 12 bytes
%y = trunc i32 %x to i16 ; 10 bytes
call void @call_target() ; 10 bytes
; bundle boundary
ret void
}
; X8664-LABEL: bundle_lock_pad_to_end_padding_0$local$__0
; X8664: 56: {{.*}} push {{.*}}$local$__1
; X8664: 5b: {{.*}} jmp {{.*}} call_target
; X8664: 60: {{.*}} add
; Tests the pad_to_end bundle alignment with 11 padding bytes needed, and some
; instructions before the call.
define internal void @bundle_lock_pad_to_end_padding_11(i32 %arg0, i32 %arg1,
i32 %arg3, i32 %arg4,
i32 %arg5, i32 %arg6) {
call void @call_target()
; bundle boundary
%x = add i32 %arg5, %arg6 ; 11 bytes
call void @call_target() ; 10 bytes
; 11 bytes of nop
; bundle boundary
ret void
}
; X8664-LABEL: bundle_lock_pad_to_end_padding_11$local$__0
; X8664: 4b: {{.*}} push {{.*}}$local$__1
; X8664: 50: {{.*}} jmp {{.*}} call_target
; X8664: 55: {{.*}} nop
; X8664: 5d: {{.*}} nop
; X8664: 60: {{.*}} add
; Tests the pad_to_end bundle alignment with 22 padding bytes needed, and no
; instructions before the call.
define internal void @bundle_lock_pad_to_end_padding_22(i32 %arg0, i32 %arg1,
i32 %arg3, i32 %arg4,
i32 %arg5, i32 %arg6) {
call void @call_target()
; bundle boundary
call void @call_target() ; 10 bytes
; 22 bytes of nop
; bundle boundary
ret void
}
; X8664-LABEL: bundle_lock_pad_to_end_padding_22$local$__0
; X8664: 40: {{.*}} push {{.*}}$local$__1
; X8664: 45: {{.*}} jmp {{.*}} call_target
; X8664: 4a: {{.*}} nop
; X8664: 52: {{.*}} nop
; X8664: 5a: {{.*}} nop
; X8664: 60: {{.*}} add
; Stack adjustment state during an argument push sequence gets ; Stack adjustment state during an argument push sequence gets
; properly checkpointed and restored during the two passes, as ; properly checkpointed and restored during the two passes, as
; observed by the stack adjustment for accessing stack-allocated ; observed by the stack adjustment for accessing stack-allocated
......
...@@ -12,6 +12,18 @@ ...@@ -12,6 +12,18 @@
; RUN: --target x8632 -i %s --args -Om1 \ ; RUN: --target x8632 -i %s --args -Om1 \
; RUN: | %if --need=target_X8632 --command FileCheck --check-prefix=OPTM1 %s ; RUN: | %if --need=target_X8632 --command FileCheck --check-prefix=OPTM1 %s
; RUN: %if --need=target_X8664 --command %p2i --filetype=obj --disassemble \
; RUN: --target x8664 -i %s --args -O2 \
; RUN: | %if --need=target_X8664 --command FileCheck --check-prefix X8664 %s
; RUN: %if --need=allow_dump --need=target_X8664 --command %p2i --filetype=asm \
; RUN: --assemble --disassemble --target x8664 -i %s --args -O2 \
; RUN: | %if --need=allow_dump --need=target_X8664 \
; RUN: --command FileCheck --check-prefix=X8664 %s
; RUN: %if --need=target_X8664 --command %p2i --filetype=obj --disassemble \
; RUN: --target x8664 -i %s --args -Om1 \
; RUN: | %if --need=target_X8664 \
; RUN: --command FileCheck --check-prefix=X8664-OPTM1 %s
; TODO(jvoung): Stop skipping unimplemented parts (via --skip-unimplemented) ; TODO(jvoung): Stop skipping unimplemented parts (via --skip-unimplemented)
; once enough infrastructure is in. Also, switch to --filetype=obj ; once enough infrastructure is in. Also, switch to --filetype=obj
; when possible. ; when possible.
...@@ -55,6 +67,29 @@ entry: ...@@ -55,6 +67,29 @@ entry:
; OPTM1: call [[TARGET]] ; OPTM1: call [[TARGET]]
; OPTM1: call [[TARGET]] ; OPTM1: call [[TARGET]]
; ;
; X8664-LABEL: CallIndirect
; Use the first call as a barrier so we skip the movs in the function prolog.
; X8664: call r{{..}}
; X8664: mov e[[REG:..]],
; X8664-NEXT: call r[[REG]]
; X8664: mov e[[REG:..]],
; X8664-NEXT: call r[[REG]]
; X8664: mov e[[REG:..]],
; X8664-NEXT: call r[[REG]]
; X8664: call r{{..}}
;
; X8664-OPTM1-LABEL: CallIndirect
; X8664-OPTM1: mov e[[REG:..]],DWORD PTR
; X8664-OPTM1: call r[[REG]]
; X8664-OPTM1: mov e[[REG:..]],DWORD PTR
; X8664-OPTM1: call r[[REG]]
; X8664-OPTM1: mov e[[REG:..]],DWORD PTR
; X8664-OPTM1: call r[[REG]]
; X8664-OPTM1: mov e[[REG:..]],DWORD PTR
; X8664-OPTM1: call r[[REG]]
; X8664-OPTM1: mov e[[REG:..]],DWORD PTR
; X8664-OPTM1: call r[[REG]]
;
; ARM32-LABEL: CallIndirect ; ARM32-LABEL: CallIndirect
; ARM32: blx [[REGISTER:r.*]] ; ARM32: blx [[REGISTER:r.*]]
; ARM32: blx [[REGISTER]] ; ARM32: blx [[REGISTER]]
...@@ -90,6 +125,24 @@ entry: ...@@ -90,6 +125,24 @@ entry:
; OPTM1: call [[TARGET]] ; OPTM1: call [[TARGET]]
; OPTM1: call [[TARGET]] ; OPTM1: call [[TARGET]]
; ;
; X8664-LABEL: CallIndirectGlobal
; X8664: call r[[REG]]
; X8664: mov e[[REG:..]]
; X8664-NEXT: call r[[REG]]
; X8664: mov e[[REG:..]]
; X8664-NEXT: call r[[REG]]
; X8664: call r{{..}}
;
; X8664-OPTM1-LABEL: CallIndirectGlobal
; X8664-OPTM1: mov e[[REG:..]],DWORD PTR
; X8664-OPTM1: call r[[REG]]
; X8664-OPTM1: mov e[[REG:..]],DWORD PTR
; X8664-OPTM1: call r[[REG]]
; X8664-OPTM1: mov e[[REG:..]],DWORD PTR
; X8664-OPTM1: call r[[REG]]
; X8664-OPTM1: mov e[[REG:..]],DWORD PTR
; X8664-OPTM1: call r[[REG]]
;
; ARM32-LABEL: CallIndirectGlobal ; ARM32-LABEL: CallIndirectGlobal
; ARM32: blx {{r.*}} ; ARM32: blx {{r.*}}
; ARM32: blx [[REGISTER:r[0-9]*]] ; ARM32: blx [[REGISTER:r[0-9]*]]
...@@ -118,6 +171,16 @@ entry: ...@@ -118,6 +171,16 @@ entry:
; OPTM1: e8 bc 03 01 00 call {{[0-9a-f]+}} {{.*}} R_386_PC32 *ABS* ; OPTM1: e8 bc 03 01 00 call {{[0-9a-f]+}} {{.*}} R_386_PC32 *ABS*
; OPTM1: e8 bc 03 01 00 call {{[0-9a-f]+}} {{.*}} R_386_PC32 *ABS* ; OPTM1: e8 bc 03 01 00 call {{[0-9a-f]+}} {{.*}} R_386_PC32 *ABS*
; ;
; X8664-LABEL: CallConst
; TODO(jpp): fix absolute call emission.
; These are broken: the emitted code should be
; e8 00 00 00 00 call {{.*}} *ABS*+0x103bc
;
; X8664-OPTM1-LABEL: CallConst
; TODO(jpp): fix absolute call emission.
; These are broken: the emitted code should be
; e8 00 00 00 00 call {{.*}} *ABS*+0x103bc
;
; ARM32-LABEL: CallConst ; ARM32-LABEL: CallConst
; ARM32: movw [[REGISTER:r.*]], #960 ; ARM32: movw [[REGISTER:r.*]], #960
; ARM32: movt [[REGISTER]], #1 ; ARM32: movt [[REGISTER]], #1
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment