Commit dc61925c by John Porto

Subzero. ARM32. Nonsfi.

Adds nonsfi support to the ARM32 backend. BUG= https://bugs.chromium.org/p/nativeclient/issues/detail?id=4076 R=stichnot@chromium.org Review URL: https://codereview.chromium.org/1665263003 .
parent e54e530e
......@@ -447,7 +447,6 @@ check-xtest: $(OBJDIR)/pnacl-sz make_symlink runtime
-i x8664,native,sse4.1,test_vector_ops \
-i x8664,sandbox,sse4.1,Om1 \
-i arm32,neon \
-e arm32,nonsfi \
-e arm32,neon,test_vector_ops \
-e arm32,neon,test_select
PNACL_BIN_PATH=$(PNACL_BIN_PATH) \
......
......@@ -76,15 +76,17 @@ entry:
define i32 @_Z4castIdbET0_T_(double %a) {
entry:
; %tobool = fcmp une double %a, 0.000000e+00
%tobool = fptoui double %a to i1
%tobool.ret_ext = zext i1 %tobool to i32
%tobool = fptoui double %a to i32
%tobool.i1 = trunc i32 %tobool to i1
%tobool.ret_ext = zext i1 %tobool.i1 to i32
ret i32 %tobool.ret_ext
}
define i32 @_Z4castIfbET0_T_(float %a) {
entry:
; %tobool = fcmp une float %a, 0.000000e+00
%tobool = fptoui float %a to i1
%tobool.ret_ext = zext i1 %tobool to i32
%tobool = fptoui float %a to i32
%tobool.i1 = trunc i32 %tobool to i1
%tobool.ret_ext = zext i1 %tobool.i1 to i32
ret i32 %tobool.ret_ext
}
......@@ -23,6 +23,7 @@ def Translate(ll_files, extra_args, obj, verbose):
'-O2',
'-filetype=obj',
'-bitcode-format=llvm',
'-arm-enable-dwarf-eh=1',
'-o', obj
] + extra_args, echo=verbose)
shellcmd(['le32-nacl-objcopy',
......
......@@ -177,6 +177,7 @@ def main():
obj_sz])
objs.append(obj_sz)
shellcmd(['{bin}/pnacl-llc'.format(bin=bindir),
'-arm-enable-dwarf-eh=1',
'-mtriple=' + triple,
'-externalize',
'-filetype=obj',
......@@ -237,7 +238,7 @@ def main():
compiler = '{bin}/{prefix}{cc}'.format(
bin=bindir, prefix='pnacl-',
cc='clang' if pure_c else 'clang++')
shellcmd([compiler,
shellcmd([compiler] + target_params + [
args.driver,
'-O2',
'-o', bitcode_nonfinal,
......@@ -253,6 +254,7 @@ def main():
'-disable-opt',
bitcode_nonfinal, '-S', '-o', bitcode])
shellcmd(['{bin}/pnacl-llc'.format(bin=bindir),
'-arm-enable-dwarf-eh=1',
'-mtriple=' + triple,
'-externalize',
'-filetype=obj',
......
......@@ -42,6 +42,13 @@ def RunNativePrefix(toolchain_root, target, run_cmd):
prefix = arch_map[target]
return (prefix + ' ' + run_cmd) if prefix else run_cmd
def NonsfiLoaderArch(target):
"""Returns the arch for the nonsfi_loader"""
arch_map = { 'arm32' : 'arm',
'x8632' : 'x86-32',
}
return arch_map[target]
def main():
"""Framework for cross test generation and execution.
......@@ -176,8 +183,11 @@ def main():
if sb == 'sandbox':
run_cmd = '{root}/run.py -q '.format(root=root) + run_cmd
elif sb == 'nonsfi':
run_cmd = ('{root}/scons-out/opt-linux-x86-32/obj/src/nonsfi/' +
'loader/nonsfi_loader ').format(root=root) + run_cmd
run_cmd = (
'{root}/scons-out/opt-linux-{arch}/obj/src/nonsfi/' +
'loader/nonsfi_loader ').format(
root=root, arch=NonsfiLoaderArch(target)) + run_cmd
run_cmd = RunNativePrefix(args.toolchain_root, target, run_cmd)
else:
run_cmd = RunNativePrefix(args.toolchain_root, target, run_cmd)
if args.lit:
......
......@@ -63,3 +63,4 @@ double __Sz_sitofp_i64_f64(int64_t Value) { return (double)Value; }
// memset - call @llvm.memset.p0i8.i32
// unsandboxed_irt:
// __nacl_read_tp
// __aeabi_read_tp [arm32 only]
......@@ -14,3 +14,6 @@
.text
.p2alignl 4,0xE7FEDEF0
.globl __nacl_read_tp
__nacl_read_tp:
b __aeabi_read_tp
......@@ -143,6 +143,12 @@ llvm::StringRef Assembler::getBufferView() const {
Buffer.size());
}
void Assembler::bindRelocOffset(RelocOffset *Offset) {
if (!getPreliminary()) {
Offset->setOffset(Buffer.getPosition());
}
}
void Assembler::emitIASBytes(GlobalContext *Ctx) const {
Ostream &Str = Ctx->getStrEmit();
intptr_t EndPosition = Buffer.size();
......
......@@ -311,6 +311,8 @@ public:
return Buffer.createTextFixup(Text, BytesUsed);
}
void bindRelocOffset(RelocOffset *Offset);
void setNeedsTextFixup() { Buffer.setNeedsTextFixup(); }
void resetNeedsTextFixup() { Buffer.resetNeedsTextFixup(); }
......
......@@ -609,12 +609,13 @@ size_t MoveRelocatableFixup::emit(GlobalContext *Ctx,
return InstARM32::InstSize;
Ostream &Str = Ctx->getStrEmit();
IValueT Inst = Asm.load<IValueT>(position());
const bool IsMovw = kind() == llvm::ELF::R_ARM_MOVW_ABS_NC ||
kind() == llvm::ELF::R_ARM_MOVW_PREL_NC;
Str << "\t"
"mov" << (kind() == llvm::ELF::R_ARM_MOVW_ABS_NC ? "w" : "t") << "\t"
"mov" << (IsMovw ? "w" : "t") << "\t"
<< RegARM32::getRegName(RegNumT::fixme((Inst >> kRdShift) & 0xF))
<< ", #:" << (kind() == llvm::ELF::R_ARM_MOVW_ABS_NC ? "lower" : "upper")
<< "16:" << symbol(Ctx, &Asm) << "\t@ .word "
<< llvm::format_hex_no_prefix(Inst, 8) << "\n";
<< ", #:" << (IsMovw ? "lower" : "upper") << "16:" << symbol(Ctx, &Asm)
<< "\t@ .word " << llvm::format_hex_no_prefix(Inst, 8) << "\n";
return InstARM32::InstSize;
}
......@@ -625,8 +626,7 @@ void MoveRelocatableFixup::emitOffset(Assembler *Asm) const {
const IValueT Inst = Asm->load<IValueT>(position());
constexpr IValueT Imm16Mask = 0x000F0FFF;
const IValueT Imm16 =
offset() >> (kind() == llvm::ELF::R_ARM_MOVW_ABS_NC ? 0 : 16) & 0xffff;
const IValueT Imm16 = offset() & 0xffff;
Asm->store(position(),
(Inst & ~Imm16Mask) | ((Imm16 >> 12) << 16) | (Imm16 & 0xfff));
}
......@@ -635,8 +635,10 @@ MoveRelocatableFixup *AssemblerARM32::createMoveFixup(bool IsMovW,
const Constant *Value) {
MoveRelocatableFixup *F =
new (allocate<MoveRelocatableFixup>()) MoveRelocatableFixup();
F->set_kind(IsMovW ? llvm::ELF::R_ARM_MOVW_ABS_NC
: llvm::ELF::R_ARM_MOVT_ABS);
F->set_kind(IsMovW ? (IsNonsfi ? llvm::ELF::R_ARM_MOVW_PREL_NC
: llvm::ELF::R_ARM_MOVW_ABS_NC)
: (IsNonsfi ? llvm::ELF::R_ARM_MOVT_PREL
: llvm::ELF::R_ARM_MOVT_ABS));
F->set_value(Value);
Buffer.installFixup(F);
return F;
......
......@@ -95,8 +95,8 @@ public:
const RegNumT FrameOrStackReg;
};
explicit AssemblerARM32(bool use_far_branches = false)
: Assembler(Asm_ARM32) {
explicit AssemblerARM32(bool IsNonsfi, bool use_far_branches = false)
: Assembler(Asm_ARM32), IsNonsfi(IsNonsfi) {
// TODO(kschimpf): Add mode if needed when branches are handled.
(void)use_far_branches;
}
......@@ -551,6 +551,8 @@ public:
private:
ENABLE_MAKE_UNIQUE;
const bool IsNonsfi;
// A vector of pool-allocated x86 labels for CFG nodes.
using LabelVector = std::vector<Label *>;
LabelVector CfgNodeLabels;
......
......@@ -167,7 +167,6 @@ public:
Label *getOrCreateCfgNodeLabel(SizeT Number);
Label *getOrCreateLocalLabel(SizeT Number);
void bindLocalLabel(SizeT Number);
void bindRelocOffset(RelocOffset *Offset);
bool fixupIsPCRel(FixupKind Kind) const override {
// Currently assuming this is the only PC-rel relocation type used.
......
......@@ -106,13 +106,6 @@ void AssemblerX86Base<TraitsType>::bindLocalLabel(SizeT Number) {
}
template <typename TraitsType>
void AssemblerX86Base<TraitsType>::bindRelocOffset(RelocOffset *Offset) {
if (!getPreliminary()) {
Offset->setOffset(Buffer.getPosition());
}
}
template <typename TraitsType>
void AssemblerX86Base<TraitsType>::call(GPRRegister reg) {
AssemblerBuffer::EnsureCapacity ensured(&Buffer);
emitRexB(RexTypeIrrelevant, reg);
......
......@@ -157,6 +157,19 @@ public:
}
/// @}
/// \name Manage the Globals used by this function.
/// @{
std::unique_ptr<VariableDeclarationList> getGlobalInits() {
return std::move(GlobalInits);
}
void addGlobal(VariableDeclaration *Global) {
if (GlobalInits == nullptr) {
GlobalInits.reset(new VariableDeclarationList);
}
GlobalInits->push_back(Global);
}
/// @}
/// \name Miscellaneous accessors.
/// @{
TargetLowering *getTarget() const { return Target.get(); }
......@@ -166,9 +179,6 @@ public:
return llvm::dyn_cast<T>(TargetAssembler.get());
}
Assembler *releaseAssembler() { return TargetAssembler.release(); }
std::unique_ptr<VariableDeclarationList> getGlobalInits() {
return std::move(GlobalInits);
}
bool hasComputedFrame() const;
bool getFocusedTiming() const { return FocusedTiming; }
void setFocusedTiming() { FocusedTiming = true; }
......
......@@ -22,9 +22,9 @@
#include "IceGlobalInits.h"
#include "IceInst.h"
#include "IceOperand.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm::ELF;
#include "llvm/Support/ELF.h"
#include "llvm/Support/MathExtras.h"
namespace Ice {
......@@ -419,11 +419,13 @@ void ELFObjectWriter::writeDataOfType(SectionType ST,
Section->appendZeros(Str, Init->getNumBytes());
break;
case VariableDeclaration::Initializer::RelocInitializerKind: {
const auto Reloc =
const auto *Reloc =
llvm::cast<VariableDeclaration::RelocInitializer>(Init.get());
AssemblerFixup NewFixup;
NewFixup.set_position(Section->getCurrentSize());
NewFixup.set_kind(RelocationKind);
NewFixup.set_kind(Reloc->hasFixup() ? Reloc->getFixup()
: RelocationKind);
assert(NewFixup.kind() != llvm::ELF::R_ARM_NONE);
constexpr bool SuppressMangling = true;
NewFixup.set_value(Ctx.getConstantSym(
Reloc->getOffset(), Reloc->getDeclaration()->mangleName(&Ctx),
......
......@@ -20,6 +20,7 @@
#define SUBZERO_SRC_ICEGLOBALINITS_H
#include "IceDefs.h"
#include "IceFixups.h"
#include "IceGlobalContext.h"
#include "IceIntrinsics.h"
#include "IceOperand.h"
......@@ -321,7 +322,16 @@ public:
static std::unique_ptr<RelocInitializer>
create(const GlobalDeclaration *Declaration,
const RelocOffsetArray &OffsetExpr) {
return makeUnique<RelocInitializer>(Declaration, OffsetExpr);
constexpr bool NoFixup = false;
return makeUnique<RelocInitializer>(Declaration, OffsetExpr, NoFixup);
}
static std::unique_ptr<RelocInitializer>
create(const GlobalDeclaration *Declaration,
const RelocOffsetArray &OffsetExpr, FixupKind Fixup) {
constexpr bool HasFixup = true;
return makeUnique<RelocInitializer>(Declaration, OffsetExpr, HasFixup,
Fixup);
}
RelocOffsetT getOffset() const {
......@@ -332,6 +342,12 @@ public:
return Offset;
}
bool hasFixup() const { return HasFixup; }
FixupKind getFixup() const {
assert(HasFixup);
return Fixup;
}
const GlobalDeclaration *getDeclaration() const { return Declaration; }
SizeT getNumBytes() const final { return RelocAddrSize; }
void dump(GlobalContext *Ctx, Ostream &Stream) const final;
......@@ -344,14 +360,17 @@ public:
ENABLE_MAKE_UNIQUE;
RelocInitializer(const GlobalDeclaration *Declaration,
const RelocOffsetArray &OffsetExpr)
const RelocOffsetArray &OffsetExpr, bool HasFixup,
FixupKind Fixup = 0)
: Initializer(RelocInitializerKind),
Declaration(Declaration), // The global declaration used in the reloc.
OffsetExpr(OffsetExpr) {}
OffsetExpr(OffsetExpr), HasFixup(HasFixup), Fixup(Fixup) {}
const GlobalDeclaration *Declaration;
/// The offset to add to the relocation.
const RelocOffsetArray OffsetExpr;
const bool HasFixup = false;
const FixupKind Fixup = 0;
};
/// Models the list of initializers.
......
......@@ -354,7 +354,8 @@ OperandARM32FlexFpImm::OperandARM32FlexFpImm(Cfg * /*Func*/, Type Ty,
uint32_t ModifiedImm)
: OperandARM32Flex(kFlexFpImm, Ty), ModifiedImm(ModifiedImm) {}
bool OperandARM32FlexFpImm::canHoldImm(Operand *C, uint32_t *ModifiedImm) {
bool OperandARM32FlexFpImm::canHoldImm(const Operand *C,
uint32_t *ModifiedImm) {
switch (C->getType()) {
default:
llvm::report_fatal_error("Unhandled fp constant type.");
......@@ -369,7 +370,7 @@ bool OperandARM32FlexFpImm::canHoldImm(Operand *C, uint32_t *ModifiedImm) {
static constexpr uint32_t AllowedBits = a | B | bbbbb | cdefgh;
static_assert(AllowedBits == 0xFFF80000u,
"Invalid mask for f32 modified immediates.");
const float F32 = llvm::cast<ConstantFloat>(C)->getValue();
const float F32 = llvm::cast<const ConstantFloat>(C)->getValue();
const uint32_t I32 = Utils::bitCopy<uint32_t>(F32);
if (I32 & ~AllowedBits) {
// constant has disallowed bits.
......@@ -398,7 +399,7 @@ bool OperandARM32FlexFpImm::canHoldImm(Operand *C, uint32_t *ModifiedImm) {
static constexpr uint32_t AllowedBits = a | B | bbbbbbbb | cdefgh;
static_assert(AllowedBits == 0xFFFF0000u,
"Invalid mask for f64 modified immediates.");
const double F64 = llvm::cast<ConstantDouble>(C)->getValue();
const double F64 = llvm::cast<const ConstantDouble>(C)->getValue();
const uint64_t I64 = Utils::bitCopy<uint64_t>(F64);
if (I64 & 0xFFFFFFFFu) {
// constant has disallowed bits.
......@@ -1725,6 +1726,9 @@ void InstARM32Label::emit(const Cfg *Func) const {
void InstARM32Label::emitIAS(const Cfg *Func) const {
auto *Asm = Func->getAssembler<ARM32::AssemblerARM32>();
Asm->bindLocalLabel(Func, this, Number);
if (OffsetReloc != nullptr) {
Asm->bindRelocOffset(OffsetReloc);
}
if (Asm->needsTextFixup())
emitUsingTextFixup(Func);
}
......@@ -1867,6 +1871,9 @@ template <> void InstARM32Movw::emit(const Cfg *Func) const {
if (auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src0)) {
Str << "#:lower16:";
CR->emitWithoutPrefix(Func->getTarget());
if (Func->getContext()->getFlags().getUseNonsfi()) {
Str << " - .";
}
} else {
Src0->emit(Func);
}
......@@ -1893,6 +1900,9 @@ template <> void InstARM32Movt::emit(const Cfg *Func) const {
if (auto *CR = llvm::dyn_cast<ConstantRelocatable>(Src1)) {
Str << "#:upper16:";
CR->emitWithoutPrefix(Func->getTarget());
if (Func->getContext()->getFlags().getUseNonsfi()) {
Str << " - .";
}
} else {
Src1->emit(Func);
}
......
......@@ -265,7 +265,7 @@ public:
return Operand->getKind() == static_cast<OperandKind>(kFlexFpImm);
}
static bool canHoldImm(Operand *C, uint32_t *ModifiedImm);
static bool canHoldImm(const Operand *C, uint32_t *ModifiedImm);
uint32_t getModifiedImm() const { return ModifiedImm; }
......@@ -973,10 +973,13 @@ public:
void emit(const Cfg *Func) const override;
void emitIAS(const Cfg *Func) const override;
void dump(const Cfg *Func) const override;
void setRelocOffset(RelocOffset *Value) { OffsetReloc = Value; }
private:
InstARM32Label(Cfg *Func, TargetARM32 *Target);
RelocOffset *OffsetReloc = nullptr;
SizeT Number; // used for unique label generation.
};
......
......@@ -556,6 +556,10 @@ void ConstantRelocatable::emitWithoutPrefix(const TargetLowering *Target,
void ConstantRelocatable::dump(const Cfg *Func, Ostream &Str) const {
if (!BuildDefs::dump())
return;
if (!EmitString.empty()) {
Str << EmitString;
return;
}
Str << "@";
if (Func && !SuppressMangling) {
Str << Func->getContext()->mangleName(Name);
......
......@@ -843,6 +843,10 @@ void TargetDataLowering::emitGlobal(const VariableDeclaration &Var,
llvm::cast<VariableDeclaration::RelocInitializer>(Init.get());
Str << "\t" << getEmit32Directive() << "\t";
Str << Reloc->getDeclaration()->mangleName(Ctx);
if (Reloc->hasFixup()) {
// TODO(jpp): this is ARM32 specific.
Str << "(GOTOFF)";
}
if (RelocOffsetT Offset = Reloc->getOffset()) {
if (Offset >= 0 || (Offset == INT32_MIN))
Str << " + " << Offset;
......
......@@ -24,6 +24,8 @@
#include "llvm/ADT/SmallBitVector.h"
#include <unordered_set>
namespace Ice {
namespace ARM32 {
......@@ -64,7 +66,8 @@ public:
}
std::unique_ptr<::Ice::Assembler> createAssembler() const override {
return makeUnique<ARM32::AssemblerARM32>();
const bool IsNonsfi = SandboxingType == ST_Nonsfi;
return makeUnique<ARM32::AssemblerARM32>(IsNonsfi);
}
void initNodeForLowering(CfgNode *Node) override {
......@@ -856,6 +859,48 @@ protected:
void postLowerLegalization();
/// Manages the GotPtr variable, which is used for Nonsfi sandboxing.
/// @{
void createGotPtr();
void insertGotPtrInitPlaceholder();
VariableDeclaration *createGotRelocation(RelocOffset *AddPcReloc);
void materializeGotAddr(CfgNode *Node);
Variable *GotPtr = nullptr;
// TODO(jpp): use CfgLocalAllocator.
/// @}
/// Manages the Gotoff relocations created during the function lowering. A
/// single Gotoff relocation is created for each global variable used by the
/// function being lowered.
/// @{
// TODO(jpp): if the same global G is used in different functions, then this
// method will emit one G(gotoff) relocation per function.
IceString createGotoffRelocation(const ConstantRelocatable *CR);
std::unordered_set<IceString> KnownGotoffs;
/// @}
/// Loads the constant relocatable Name to Register. Then invoke Finish to
/// finish the relocatable lowering. Finish **must** use PC in its first
/// emitted instruction, or the relocatable in Register will contain the wrong
/// value.
//
// Lowered sequence:
//
// Movw:
// movw Register, #:lower16:Name - (End - Movw) - 8 .
// Movt:
// movt Register, #:upper16:Name - (End - Movt) - 8 .
// PC = fake-def
// End:
// Finish(PC)
//
// The -8 in movw/movt above is to account for the PC value that the first
// instruction emitted by Finish(PC) will read.
void loadNamedConstantRelocatablePIC(const IceString &Name,
Variable *Register,
std::function<void(Variable *PC)> Finish,
bool SuppressMangling = true);
/// Sandboxer defines methods for ensuring that "dangerous" operations are
/// masked during sandboxed code emission. For regular, non-sandboxed code
/// emission, its methods are simple pass-through methods.
......
; RUN: %p2i -i %s --filetype=obj --assemble --disassemble --args -O2 -nonsfi=1 \
; RUN: --ffunction-sections \
; RUN: %p2i -i %s --target=x8632 --filetype=obj --assemble --disassemble \
; RUN: --args -O2 -nonsfi=1 --ffunction-sections \
; RUN: | FileCheck --check-prefix=NONSFI %s
; RUN: %p2i -i %s --filetype=obj --assemble --disassemble --args -O2 -nonsfi=0 \
; RUN: --ffunction-sections \
; RUN: %p2i -i %s --target=x8632 --filetype=obj --assemble --disassemble \
; RUN: --args -O2 -nonsfi=0 --ffunction-sections \
; RUN: | FileCheck --check-prefix=DEFAULT %s
; RUN: %p2i -i %s --target=arm32 --filetype=obj --assemble --disassemble \
; RUN: --args -O2 -nonsfi=1 --ffunction-sections \
; RUN: | FileCheck --check-prefix=ARM32-NONSFI %s
@G1 = internal global [4 x i8] zeroinitializer, align 4
@G2 = internal global [4 x i8] zeroinitializer, align 4
......@@ -20,6 +24,13 @@ entry:
; NONSFI: call {{.*}} R_386_PC32 {{.*}}testLoadBasic
; DEFAULT-LABEL: testCallRegular
; ARM32-NONSFI-LABEL: testCallRegular
; ARM32-NONSFI: movw [[REG:r[0-9]+]], {{.*}} R_ARM_MOVW_PREL_NC GOTOFF{{.*}}testLoadBasic
; ARM32-NONSFI-NEXT: movt [[REG:r[0-9]+]], {{.*}} R_ARM_MOVT_PREL GOTOFF{{.*}}testLoadBasic
; ARM32-NONSFI-NEXT: ldr [[GOTOFF:r[0-9]+]], [pc, [[REG]]]
; ARM32-NONSFI-NEXT: add [[CT:r[0-9]+]], {{.*}}, [[CT]]
; ARM32-NONSFI: blx [[CT]]
define internal double @testCallBuiltin(double %val) {
entry:
%result = frem double %val, %val
......@@ -30,6 +41,13 @@ entry:
; NONSFI: call {{.*}} R_386_PC32 fmod
; DEFAULT-LABEL: testCallBuiltin
; ARM32-NONSFI-LABEL: testCallBuiltin
; ARM32-NONSFI: movw [[REG:r[0-9]+]], {{.*}} R_ARM_MOVW_PREL_NC GOTOFF{{.*}}fmod
; ARM32-NONSFI-NEXT: movt [[REG:r[0-9]+]], {{.*}} R_ARM_MOVT_PREL GOTOFF{{.*}}fmod
; ARM32-NONSFI-NEXT: ldr [[GOTOFF:r[0-9]+]], [pc, [[REG]]]
; ARM32-NONSFI-NEXT: add [[CT:r[0-9]+]], {{.*}}, [[CT]]
; ARM32-NONSFI: blx [[CT]]
define internal i32 @testLoadBasic() {
entry:
%a = bitcast [4 x i8]* @G1 to i32*
......@@ -41,6 +59,14 @@ entry:
; NONSFI: mov {{.*}} R_386_GOTOFF {{G1|.bss}}
; DEFAULT-LABEL: testLoadBasic
; ARM32 PIC load.
; ARM32-NONSFI-LABEL: testLoadBasic
; ARM32-NONSFI: movw {{.*}} R_ARM_MOVW_PREL_NC _GLOBAL_OFFSET_TABLE_
; ARM32-NONSFI-NEXT: movt {{.*}} R_ARM_MOVT_PREL _GLOBAL_OFFSET_TABLE_
; ARM32-NONSFI: movw [[REG:r[0-9]+]], {{.*}} R_ARM_MOVW_PREL_NC {{.*}}G1
; ARM32-NONSFI-NEXT: movt [[REG]], {{.*}} R_ARM_MOVT_PREL {{.*}}G1
; ARM32-NONSFI-NEXT: ldr r{{[0-9]+}}, [pc, [[REG]]]
define internal i32 @testLoadFixedOffset() {
entry:
%a = ptrtoint [4 x i8]* @G1 to i32
......@@ -54,6 +80,15 @@ entry:
; NONSFI: mov {{.*}}+0x4] {{.*}} R_386_GOTOFF {{G1|.bss}}
; DEFAULT-LABEL: testLoadFixedOffset
; ARM32-NONSFI-LABEL: testLoadFixedOffset
; ARM32-NONSFI: movw [[GOT:r[0-9]+]], {{.*}} R_ARM_MOVW_PREL_NC _GLOBAL_OFFSET_TABLE_
; ARM32-NONSFI-NEXT: movt [[GOT]], {{.*}} R_ARM_MOVT_PREL _GLOBAL_OFFSET_TABLE_
; ARM32-NONSFI: movw [[REG:r[0-9]+]], {{.*}} R_ARM_MOVW_PREL_NC {{.*}}G1
; ARM32-NONSFI-NEXT: movt [[REG]], {{.*}} R_ARM_MOVT_PREL {{.*}}G1
; ARM32-NONSFI-NEXT: ldr [[ADDR:r[0-9]+]], [pc, [[REG]]]
; ARM32-NONSFI-NEXT: add [[G1BASE:r[0-9]+]], [[GOT]], [[ADDR]]
; ARM32-NONSFI-NEXT: add {{.*}}, [[G1BASE]], #4
define internal i32 @testLoadIndexed(i32 %idx) {
entry:
%a = ptrtoint [4 x i8]* @G1 to i32
......@@ -70,6 +105,15 @@ entry:
; NONSFI: mov {{.*}}*4+0xc] {{.*}} R_386_GOTOFF {{G1|.bss}}
; DEFAULT-LABEL: testLoadIndexed
; ARM32-NONSFI-LABEL: testLoadIndexed
; ARM32-NONSFI: movw [[GOT:r[0-9]+]], {{.*}} R_ARM_MOVW_PREL_NC _GLOBAL_OFFSET_TABLE_
; ARM32-NONSFI-NEXT: movt [[GOT]], {{.*}} R_ARM_MOVT_PREL _GLOBAL_OFFSET_TABLE_
; ARM32-NONSFI: movw [[REG:r[0-9]+]], {{.*}} R_ARM_MOVW_PREL_NC {{.*}}G1
; ARM32-NONSFI-NEXT: movt [[REG]], {{.*}} R_ARM_MOVT_PREL {{.*}}G1
; ARM32-NONSFI-NEXT: ldr [[ADDR:r[0-9]+]], [pc, [[REG]]]
; ARM32-NONSFI-NEXT: add [[G1BASE:r[0-9]+]], [[GOT]], [[ADDR]]
; ARaM32-NONSFI-NEXT: add {{.*}}, [[G1BASE]]
define internal i32 @testLoadIndexedBase(i32 %base, i32 %idx) {
entry:
%a = ptrtoint [4 x i8]* @G1 to i32
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment