Commit 52b51571 by John Porto

Subzero. ARM32. Implements sandboxing.

(See https://codereview.chromium.org/1491473002/ for the steps taken while implementing sandboxing.) BUG= https://code.google.com/p/nativeclient/issues/detail?id=4076 R=stichnot@chromium.org Review URL: https://codereview.chromium.org/1499983002 .
parent dca86741
......@@ -402,9 +402,9 @@ check-xtest: $(OBJDIR)/pnacl-sz make_symlink runtime
-e x8664,native,sse2 \
-e x8664,native,sse4.1,test_vector_ops \
-e x8664,native,sse2,test_global \
-i arm32,native,neon \
-e arm32,native,neon,test_vector_ops \
-e arm32,native,neon,test_select
-i arm32,neon \
-e arm32,neon,test_vector_ops \
-e arm32,neon,test_select
PNACL_BIN_PATH=$(PNACL_BIN_PATH) \
$(LLVM_SRC_PATH)/utils/lit/lit.py -sv crosstest/Output
endif
......
......@@ -182,7 +182,7 @@ def main():
).format(root=nacl_root, sb='sb' if args.sandbox else 'native'))
pure_c = os.path.splitext(args.driver)[1] == '.c'
# TargetX8664 is ilp32, but clang does not currently support such
# TargetX8664 is ilp32, but pnacl-clang does not currently support such
# configuration. In order to run the crosstests we play nasty, dangerous
# tricks with the stack pointer.
needs_stack_hack = (args.target == 'x8664')
......@@ -202,7 +202,7 @@ def main():
bin=bindir, prefix='pnacl-' if args.sandbox else '',
cc='clang' if pure_c else 'clang++')
sb_native_args = (['-O0', '--pnacl-allow-native',
'-arch', target_info.target,
'-arch', target_info.compiler_arch,
'-Wn,-defsym=__Sz_AbsoluteZero=0']
if args.sandbox else
['-g', '-target=' + triple,
......
......@@ -66,8 +66,9 @@ def main():
flat_attrs += v
arch_flags = { 'x8632': [],
'x8664': [],
# ARM doesn't have an ELF writer yet.
'arm32': ['--filetype=iasm'] }
# ARM doesn't have an ELF writer yet, and iasm does not
# support sandboxing yet.
'arm32': ['--filetype=asm'] }
# all_keys is only used in the help text.
all_keys = '; '.join([' '.join(targets), ' '.join(sandboxing),
' '.join(opt_levels), ' '.join(flat_attrs)])
......
......@@ -11,14 +11,13 @@ import tempfile
from utils import shellcmd
def TargetAssemblerFlags(target):
# TODO(stichnot): -triple=i686-nacl should be used for a
# sandboxing test. This means there should be an args.sandbox
# argument that also gets passed through to pnacl-sz.
def TargetAssemblerFlags(target, sandboxed):
# TODO(reed kotler). Need to find out exactly we need to
# add here for Mips32.
flags = { 'x8632': ['-triple=i686'],
'arm32': ['-triple=armv7a', '-mcpu=cortex-a9', '-mattr=+neon'],
flags = { 'x8632': ['-triple=%s' % ('i686-nacl' if sandboxed else 'i686')],
'arm32': ['-triple=%s' % (
'armv7a-nacl' if sandboxed else 'armv7a'),
'-mcpu=cortex-a9', '-mattr=+neon'],
'mips32': ['-triple=mipsel' ] }
return flags[target]
......@@ -89,6 +88,8 @@ def main():
argparser.add_argument('--args', '-a', nargs=argparse.REMAINDER,
default=[],
help='Remaining arguments are passed to pnacl-sz')
argparser.add_argument('--sandbox', required=False, action='store_true',
help='Sandboxes the generated code.')
args = argparser.parse_args()
pnacl_bin_path = args.pnacl_bin_path
......@@ -121,6 +122,8 @@ def main():
cmd += [os.path.join(pnacl_bin_path, 'not')]
cmd += [args.pnacl_sz]
cmd += ['--target', args.target]
if args.sandbox:
cmd += ['-sandbox']
if args.insts:
# If the tests are based on '-verbose inst' output, force
# single-threaded translation because dump output does not get
......@@ -147,7 +150,7 @@ def main():
asm_temp.close()
if args.assemble and args.filetype != 'obj':
cmd += (['|', os.path.join(pnacl_bin_path, 'llvm-mc')] +
TargetAssemblerFlags(args.target) +
TargetAssemblerFlags(args.target, args.sandbox) +
['-filetype=obj', '-o', asm_temp.name])
elif asm_temp:
cmd += ['-o', asm_temp.name]
......
......@@ -318,10 +318,15 @@ def ProcessPexe(args, pexe, exe):
# Run the linker regardless of hybrid mode.
if args.sandbox:
assert args.target in ['x8632'], \
assert args.target in ('x8632', 'arm32'), \
'-sandbox is not available for %s' % args.target
target_lib_dir = {
'arm32': 'arm',
'x8632': 'x86-32',
}[args.target]
linklib = ('{root}/toolchain/linux_x86/pnacl_newlib_raw/translator/' +
'x86-32/lib').format(root=nacl_root)
'{target_dir}/lib').format(root=nacl_root,
target_dir=target_lib_dir)
shellcmd((
'{gold} -nostdlib --no-fix-cortex-a8 --eh-frame-hdr -z text ' +
'--build-id --entry=__pnacl_start -static ' +
......
......@@ -17,22 +17,25 @@ def FindARMCrossInclude():
TargetInfo = namedtuple('TargetInfo',
['target', 'triple', 'llc_flags', 'ld_emu',
'cross_headers'])
['target', 'compiler_arch', 'triple', 'llc_flags',
'ld_emu', 'cross_headers'])
X8632Target = TargetInfo(target='x8632',
compiler_arch='x8632',
triple='i686-none-linux',
llc_flags=['-mcpu=pentium4m'],
ld_emu='elf_i386_nacl',
cross_headers=[])
X8664Target = TargetInfo(target='x8664',
compiler_arch='x8664',
triple='x86_64-none-linux',
llc_flags=['-mcpu=x86-64'],
ld_emu='elf_x86_64_nacl',
cross_headers=[])
ARM32Target = TargetInfo(target='arm32',
compiler_arch='armv7',
triple='armv7a-none-linux-gnueabihf',
llc_flags=['-mcpu=cortex-a9',
'-float-abi=hard',
......
......@@ -822,6 +822,131 @@ protected:
void postLowerLegalization();
/// Sandboxer defines methods for ensuring that "dangerous" operations are
/// masked during sandboxed code emission. For regular, non-sandboxed code
/// emission, its methods are simple pass-through methods.
///
/// The Sandboxer also emits BundleLock/BundleUnlock pseudo-instructions
/// in the constructor/destructor during sandboxed code emission. Therefore,
/// it is a bad idea to create an object of this type and "keep it around."
/// The recommended usage is:
///
/// AutoSandboxing(this).<<operation>>(...);
///
/// This usage ensures that no other instructions are inadvertently added to
/// the bundle.
class Sandboxer {
Sandboxer() = delete;
Sandboxer(const Sandboxer &) = delete;
Sandboxer &operator=(const Sandboxer &) = delete;
public:
explicit Sandboxer(
TargetARM32 *Target,
InstBundleLock::Option BundleOption = InstBundleLock::Opt_None);
~Sandboxer();
/// Increments sp:
///
/// add sp, sp, AddAmount
/// bic sp, sp, 0xc0000000
///
/// (for the rationale, see the ARM 32-bit Sandbox Specification.)
void add_sp(Operand *AddAmount);
/// Emits code to align sp to the specified alignment:
///
/// bic/and sp, sp, Alignment
/// bic, sp, sp, 0xc0000000
void align_sp(size_t Alignment);
/// Emits a call instruction. If CallTarget is a Variable, it emits
///
/// bic CallTarget, CallTarget, 0xc000000f
/// bl CallTarget
///
/// Otherwise, it emits
///
/// bl CallTarget
///
/// Note: in sandboxed code calls are always emitted in addresses 12 mod 16.
InstARM32Call *bl(Variable *ReturnReg, Operand *CallTarget);
/// Emits a load:
///
/// bic rBase, rBase, 0xc0000000
/// ldr rDest, [rBase, #Offset]
///
/// Exception: if rBase is r9 or sp, then the load is emitted as:
///
/// ldr rDest, [rBase, #Offset]
///
/// because the NaCl ARM 32-bit Sandbox Specification guarantees they are
/// always valid.
void ldr(Variable *Dest, OperandARM32Mem *Mem, CondARM32::Cond Pred);
/// Emits a load exclusive:
///
/// bic rBase, rBase, 0xc0000000
/// ldrex rDest, [rBase]
///
/// Exception: if rBase is r9 or sp, then the load is emitted as:
///
/// ldrex rDest, [rBase]
///
/// because the NaCl ARM 32-bit Sandbox Specification guarantees they are
/// always valid.
void ldrex(Variable *Dest, OperandARM32Mem *Mem, CondARM32::Cond Pred);
/// Resets sp to Src:
///
/// mov sp, Src
/// bic sp, sp, 0xc0000000
void reset_sp(Variable *Src);
/// Emits code to return from a function:
///
/// bic lr, lr, 0xc000000f
/// bx lr
void ret(Variable *RetAddr, Variable *RetValue);
/// Emits a store:
///
/// bic rBase, rBase, 0xc0000000
/// str rSrc, [rBase, #Offset]
///
/// Exception: if rBase is r9 or sp, then the store is emitted as:
///
/// str rDest, [rBase, #Offset]
///
/// because the NaCl ARM 32-bit Sandbox Specification guarantees they are
/// always valid.
void str(Variable *Src, OperandARM32Mem *Mem, CondARM32::Cond Pred);
/// Emits a store exclusive:
///
/// bic rBase, rBase, 0xc0000000
/// strex rDest, rSrc, [rBase]
///
/// Exception: if rBase is r9 or sp, then the store is emitted as:
///
/// strex rDest, rSrc, [rBase]
///
/// because the NaCl ARM 32-bit Sandbox Specification guarantees they are
/// always valid.
void strex(Variable *Dest, Variable *Src, OperandARM32Mem *Mem,
CondARM32::Cond Pred);
/// Decrements sp:
///
/// sub sp, sp, SubAmount
/// bic sp, sp, 0xc0000000
void sub_sp(Operand *SubAmount);
private:
TargetARM32 *Target;
};
class PostLoweringLegalizer {
PostLoweringLegalizer() = delete;
PostLoweringLegalizer(const PostLoweringLegalizer &) = delete;
......@@ -878,6 +1003,7 @@ protected:
int32_t TempBaseOffset = 0;
};
const bool NeedSandboxing;
TargetARM32Features CPUFeatures;
bool UsesFramePointer = false;
bool NeedsStackAlignment = false;
......
; Tests basics and corner cases of arm32 sandboxing, using -Om1 in the hope that
; the output will remain stable. When packing bundles, we try to limit to a few
; instructions with well known sizes and minimal use of registers and stack
; slots in the lowering sequence.
; REQUIRES: allow_dump, target_arm32
; RUN: %p2i -i %s --sandbox --filetype=asm --target=arm32 --assemble \
; RUN: --disassemble --args -Om1 -allow-externally-defined-symbols \
; RUN: -ffunction-sections | FileCheck %s
declare void @call_target()
declare void @call_target1(i32 %arg)
@global_short = internal global [2 x i8] zeroinitializer
; A direct call sequence uses the right mask and register-call sequence.
define internal void @test_direct_call() {
entry:
call void @call_target()
ret void
}
; CHECK-LABEL: test_direct_call
; CHECK: sub sp,
; CHECK-NEXT: bic sp, sp, {{.*}} ; 0xc0000000
; CHECK: {{[0-9]*}}c: {{.*}} bl {{.*}} call_target
; CHECK-NEXT: {{[0-9]*}}0:
; An indirect call sequence uses the right mask and register-call sequence.
define internal void @test_indirect_call(i32 %target) {
entry:
%__1 = inttoptr i32 %target to void ()*
call void %__1()
ret void
}
; CHECK-LABEL: test_indirect_call
; CHECK: sub sp,
; CHECK: bic sp, sp, {{.*}} ; 0xc0000000
; CHECK-NOT: bic sp, sp, {{.*}} ; 0xc0000000
; CHECK: ldr [[REG:r[0-9]+]], [sp,
; CHECK-NEXT: nop
; CHECK: {{[0-9]+}}8: {{.*}} bic [[REG:r[0-9]+]], [[REG]], {{.*}} 0xc000000f
; CHECK-NEXT: blx [[REG]]
; CHECk-NEXT: {{[0-9]+}}0:
; A return sequences uses the right pop / mask / jmp sequence.
define internal void @test_ret() {
entry:
ret void
}
; CHECK-LABEL: test_ret
; CHECK: 0: {{.*}} bic lr, lr, {{.*}} 0xc000000f
; CHECK-NEXT: bx lr
; Bundle lock without padding.
define internal void @bundle_lock_without_padding() {
entry:
%addr_short = bitcast [2 x i8]* @global_short to i16*
store i16 0, i16* %addr_short, align 1
ret void
}
; CHECK-LABEL: bundle_lock_without_padding
; CHECK: 0: {{.*}} movw
; CHECK-NEXT: movt
; CHECK-NEXT: movw
; CHECK-NEXT: nop
; CHECK-NEXT: bic [[REG:r[0-9]+]], {{.*}} 0xc0000000
; CHECK-NEXT: strh {{.*}}, {{[[]}}[[REG]]
; CHECK-NEXT: bic lr, lr, {{.*}} ; 0xc000000f
; CHECK-NEXT: {{.*}} bx lr
; Bundle lock with padding.
define internal void @bundle_lock_with_padding() {
entry:
call void @call_target()
; bundle boundary
store i16 0, i16* undef, align 1 ; 3 insts
store i16 0, i16* undef, align 1 ; 3 insts
store i16 0, i16* undef, align 1 ; 3 insts
; SP adjustment + pop
; nop
; bundle boundary
ret void
}
; CHECK-LABEL: bundle_lock_with_padding
; CHECK: 48: {{.*}} pop
; CHECK-NEXT: nop
; CHECK-NEXT: bic lr, {{.*}} 0xc000000f
; CHECK-NEXT: {{.*}} bx lr
; Bundle lock align_to_end without any padding.
define internal void @bundle_lock_align_to_end_padding_0() {
entry:
call void @call_target()
; bundle boundary
store i16 0, i16* undef, align 1
call void @call_target()
; bundle boundary
ret void
}
; CHECK-LABEL: bundle_lock_align_to_end_padding_0
; CHECK: c: {{.*}} bl {{.*}} call_target
; CHECK-NEXT: movw
; CHECK-NEXT: movw
; CHECK-NEXT: bic [[REG:r[0-9]+]]
; CHECK-NEXT: strh {{.*}}, {{[[]}}[[REG]]
; CHECK: {{[0-9]+}}c: {{.*}} bl {{.*}} call_target
; CHECK-NEXT: add sp
; CHECK-NEXT: bic sp, {{.*}} 0xc0000000
; CHECK-NEXT: pop
; CHECK: {{[0-9]+}}0: {{.*}} bic lr, lr, {{.*}} 0xc000000f
; CHECK-NEXT: {{.*}} bx lr
; Bundle lock align_to_end with one bunch of padding.
define internal void @bundle_lock_align_to_end_padding_1() {
entry:
call void @call_target()
; bundle boundary
store i32 65536, i32* undef, align 1
; bundle boundary
call void @call_target()
; bundle boundary
ret void
}
; CHECK-LABEL: bundle_lock_align_to_end_padding_1
; CHECK: {{[0-9]*}}c: {{.*}} bl {{.*}} call_target
; CHECK-NEXT: movw [[BASE:r[0-9]+]]
; CHECK-NEXT: movw [[REG:r[0-9]+]], #0
; CHECK-NEXT: movt [[REG]], #1
; CHECK-NEXT: nop
; CHECK-NEXT: bic [[BASE]], [[BASE]], {{.*}} 0xc0000000
; CHECK-NEXT: str [[REG]], {{[[]}}[[BASE]]
; CHECK-NEXT: nop
; CHECK-NEXT: bl {{.*}} call_target
; CHECK: {{[0-9]+}}0: {{.*}} bic lr, lr, {{.*}} 0xc000000f
; CHECK-NEXT: {{.*}} bx lr
; Bundle lock align_to_end with two bunches of padding.
define internal void @bundle_lock_align_to_end_padding_2(i32 %target) {
entry:
call void @call_target1(i32 1)
; bundle boundary
%__1 = inttoptr i32 %target to void (i32, i32, i32)*
call void %__1(i32 2, i32 3, i32 4)
ret void
}
; CHECK-LABEL: bundle_lock_align_to_end_padding_2
; CHECK: {{[0-9]+}}0:
; CHECK-NEXT: nop
; CHECK-NEXT: nop
; CHECK-NEXT: bl {{.*}} call_target
; CHECK: {{[0-9]+}}c: {{.*}} movw r2, #4
; CHECK-NEXT: nop
; CHECK-NEXT: nop
; CHECK-NEXT: bic [[REG:r[0-9]+]], [[REG]], {{.*}} 0xc000000f
; CHECK-NEXT: {{.*}} blx [[REG]]
......@@ -3,9 +3,9 @@
; we try to limit to a few instructions with well known sizes and
; minimal use of registers and stack slots in the lowering sequence.
; RUN: %p2i -i %s --filetype=obj --disassemble --args -Om1 \
; RUN: %p2i -i %s --sandbox --filetype=obj --disassemble --args -Om1 \
; RUN: -allow-externally-defined-symbols \
; RUN: -ffunction-sections -sandbox | FileCheck %s
; RUN: -ffunction-sections | FileCheck %s
declare void @call_target()
@global_byte = internal global [1 x i8] zeroinitializer
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment