Commit 67574d83 by Karl Schimpf

Fix problems with sandboxing and the ARM integrated assembler.

Fixes (at least) the obvious problems with sandboxing and the integrated assembler. This includes: Added assembly instruction Nop. Fixed implementation of padWithNop. Fixed linking to local label to only fire when persistent (i.e. last pass of assembly generation). Removed restriction on single register push/pop, since the ARM integrated assembler converts it to a corresopnding str/ldr. Fixed OperandARM32FlexImm to use smallest rotation value, so that external assemblers and the ARM integrated assembler will agree on encoding. Fixed ARM sandboxing requires test in sandboxing.ll BUG=None R=stichnot@chromium.org Review URL: https://codereview.chromium.org/1511653002 .
parent 4c435d32
...@@ -600,13 +600,15 @@ void Assembler::clrex() { ...@@ -600,13 +600,15 @@ void Assembler::clrex() {
Emit(encoding); Emit(encoding);
} }
#if 0
// Moved to ARM32::AssemblerARM32::nop().
void Assembler::nop(Condition cond) { void Assembler::nop(Condition cond) {
ASSERT(cond != kNoCondition); ASSERT(cond != kNoCondition);
int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) | int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
B25 | B24 | B21 | (0xf << 12); B25 | B24 | B21 | (0xf << 12);
Emit(encoding); Emit(encoding);
} }
#endif
void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) { void Assembler::vmovsr(SRegister sn, Register rt, Condition cond) {
......
...@@ -594,9 +594,10 @@ class Assembler : public ValueObject { ...@@ -594,9 +594,10 @@ class Assembler : public ValueObject {
// Miscellaneous instructions. // Miscellaneous instructions.
void clrex(); void clrex();
#if 0
// Moved to ARM32::AssemblerARM32::nop().
void nop(Condition cond = AL); void nop(Condition cond = AL);
#if 0
// Moved to ARM32::AssemblerARM32::bkpt() // Moved to ARM32::AssemblerARM32::bkpt()
// Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0. // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
void bkpt(uint16_t imm16); void bkpt(uint16_t imm16);
......
...@@ -35,6 +35,17 @@ static uintptr_t NewContents(Assembler &Assemblr, intptr_t Capacity) { ...@@ -35,6 +35,17 @@ static uintptr_t NewContents(Assembler &Assemblr, intptr_t Capacity) {
return Result; return Result;
} }
void Label::linkTo(const Assembler &Asm, intptr_t Pos) {
// We must not set the link until the position is absolutely known. This means
// not during the preliminary (sandboxing) pass, and not when the instruction
// needs a text fixup (hybrid iasm mode).
if (Asm.getPreliminary() || Asm.needsTextFixup())
return;
assert(!isBound());
Position = Pos + kWordSize;
assert(isLinked());
}
void AssemblerBuffer::installFixup(AssemblerFixup *F) { void AssemblerBuffer::installFixup(AssemblerFixup *F) {
if (!Assemblr.getPreliminary()) if (!Assemblr.getPreliminary())
Fixups.push_back(F); Fixups.push_back(F);
......
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
namespace Ice { namespace Ice {
class Assembler;
/// A Label can be in one of three states: /// A Label can be in one of three states:
/// - Unused. /// - Unused.
/// - Linked, unplaced and tracking the position of branches to the label. /// - Linked, unplaced and tracking the position of branches to the label.
...@@ -82,11 +84,7 @@ public: ...@@ -82,11 +84,7 @@ public:
assert(isBound()); assert(isBound());
} }
void linkTo(intptr_t position) { void linkTo(const Assembler &Asm, intptr_t position);
assert(!isBound());
Position = position + kWordSize;
assert(isLinked());
}
protected: protected:
intptr_t Position = 0; intptr_t Position = 0;
......
...@@ -460,6 +460,14 @@ size_t BlRelocatableFixup::emit(GlobalContext *Ctx, ...@@ -460,6 +460,14 @@ size_t BlRelocatableFixup::emit(GlobalContext *Ctx,
return InstARM32::InstSize; return InstARM32::InstSize;
} }
void AssemblerARM32::padWithNop(intptr_t Padding) {
constexpr intptr_t InstWidth = sizeof(IValueT);
assert(Padding % InstWidth == 0 &&
"Padding not multiple of instruction size");
for (intptr_t i = 0; i < Padding; i += InstWidth)
nop();
}
BlRelocatableFixup * BlRelocatableFixup *
AssemblerARM32::createBlFixup(const ConstantRelocatable *BlTarget) { AssemblerARM32::createBlFixup(const ConstantRelocatable *BlTarget) {
BlRelocatableFixup *F = BlRelocatableFixup *F =
...@@ -651,8 +659,7 @@ void AssemblerARM32::emitBranch(Label *L, CondARM32::Cond Cond, bool Link) { ...@@ -651,8 +659,7 @@ void AssemblerARM32::emitBranch(Label *L, CondARM32::Cond Cond, bool Link) {
const IOffsetT Position = Buffer.size(); const IOffsetT Position = Buffer.size();
// Use the offset field of the branch instruction for linking the sites. // Use the offset field of the branch instruction for linking the sites.
emitType05(Cond, L->getEncodedPosition(), Link, BranchName); emitType05(Cond, L->getEncodedPosition(), Link, BranchName);
if (!needsTextFixup()) L->linkTo(*this, Position);
L->linkTo(Position);
} }
void AssemblerARM32::emitCompareOp(CondARM32::Cond Cond, IValueT Opcode, void AssemblerARM32::emitCompareOp(CondARM32::Cond Cond, IValueT Opcode,
...@@ -717,8 +724,9 @@ void AssemblerARM32::emitMemOp(CondARM32::Cond Cond, bool IsLoad, bool IsByte, ...@@ -717,8 +724,9 @@ void AssemblerARM32::emitMemOp(CondARM32::Cond Cond, bool IsLoad, bool IsByte,
llvm::report_fatal_error(std::string(InstName) + llvm::report_fatal_error(std::string(InstName) +
": Use push/pop instead"); ": Use push/pop instead");
return emitMemOp(Cond, kInstTypeMemImmediate, IsLoad, IsByte, Rt, Address, emitMemOp(Cond, kInstTypeMemImmediate, IsLoad, IsByte, Rt, Address,
InstName); InstName);
return;
} }
case EncodedAsShiftRotateImm5: { case EncodedAsShiftRotateImm5: {
// XXX{B} (register) // XXX{B} (register)
...@@ -740,8 +748,9 @@ void AssemblerARM32::emitMemOp(CondARM32::Cond Cond, bool IsLoad, bool IsByte, ...@@ -740,8 +748,9 @@ void AssemblerARM32::emitMemOp(CondARM32::Cond Cond, bool IsLoad, bool IsByte,
verifyRegNotPc(Rn, "Rn", InstName); verifyRegNotPc(Rn, "Rn", InstName);
verifyRegsNotEq(Rn, "Rn", Rt, "Rt", InstName); verifyRegsNotEq(Rn, "Rn", Rt, "Rt", InstName);
} }
return emitMemOp(Cond, kInstTypeRegisterShift, IsLoad, IsByte, Rt, Address, emitMemOp(Cond, kInstTypeRegisterShift, IsLoad, IsByte, Rt, Address,
InstName); InstName);
return;
} }
} }
} }
...@@ -773,7 +782,8 @@ void AssemblerARM32::emitMemOpEnc3(CondARM32::Cond Cond, IValueT Opcode, ...@@ -773,7 +782,8 @@ void AssemblerARM32::emitMemOpEnc3(CondARM32::Cond Cond, IValueT Opcode,
const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) |
Opcode | (Rt << kRdShift) | Address; Opcode | (Rt << kRdShift) | Address;
AssemblerBuffer::EnsureCapacity ensured(&Buffer); AssemblerBuffer::EnsureCapacity ensured(&Buffer);
return emitInst(Encoding); emitInst(Encoding);
return;
} }
case EncodedAsShiftRotateImm5: { case EncodedAsShiftRotateImm5: {
// XXXH (register) // XXXH (register)
...@@ -800,7 +810,8 @@ void AssemblerARM32::emitMemOpEnc3(CondARM32::Cond Cond, IValueT Opcode, ...@@ -800,7 +810,8 @@ void AssemblerARM32::emitMemOpEnc3(CondARM32::Cond Cond, IValueT Opcode,
const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) |
Opcode | (Rt << kRdShift) | Address; Opcode | (Rt << kRdShift) | Address;
AssemblerBuffer::EnsureCapacity ensured(&Buffer); AssemblerBuffer::EnsureCapacity ensured(&Buffer);
return emitInst(Encoding); emitInst(Encoding);
return;
} }
} }
} }
...@@ -1130,7 +1141,8 @@ void AssemblerARM32::ldr(const Operand *OpRt, const Operand *OpAddress, ...@@ -1130,7 +1141,8 @@ void AssemblerARM32::ldr(const Operand *OpRt, const Operand *OpAddress,
// cccc011pu1w1nnnnttttiiiiiss0mmmm where cccc=Cond, tttt=Rt, U=1 if +, pu0b // cccc011pu1w1nnnnttttiiiiiss0mmmm where cccc=Cond, tttt=Rt, U=1 if +, pu0b
// is a BlockAddr, and pu0w0nnnn0000iiiiiss0mmmm=Address. // is a BlockAddr, and pu0w0nnnn0000iiiiiss0mmmm=Address.
constexpr bool IsByte = true; constexpr bool IsByte = true;
return emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, LdrName); emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, LdrName);
return;
} }
case 1: { case 1: {
// Handles i16 loads. // Handles i16 loads.
...@@ -1144,7 +1156,8 @@ void AssemblerARM32::ldr(const Operand *OpRt, const Operand *OpAddress, ...@@ -1144,7 +1156,8 @@ void AssemblerARM32::ldr(const Operand *OpRt, const Operand *OpAddress,
// iiiiiiii=Imm8, u=1 if +, pu0w is a BlockAddr, and // iiiiiiii=Imm8, u=1 if +, pu0w is a BlockAddr, and
// pu0w0nnnn0000iiiiiiiiiiii=Address. // pu0w0nnnn0000iiiiiiiiiiii=Address.
constexpr const char *Ldrh = "ldrh"; constexpr const char *Ldrh = "ldrh";
return emitMemOpEnc3(Cond, L | B7 | B5 | B4, Rt, OpAddress, TInfo, Ldrh); emitMemOpEnc3(Cond, L | B7 | B5 | B4, Rt, OpAddress, TInfo, Ldrh);
return;
} }
case 2: { case 2: {
// Note: Handles i32 and float loads. Target lowering handles i64 and // Note: Handles i32 and float loads. Target lowering handles i64 and
...@@ -1165,7 +1178,8 @@ void AssemblerARM32::ldr(const Operand *OpRt, const Operand *OpAddress, ...@@ -1165,7 +1178,8 @@ void AssemblerARM32::ldr(const Operand *OpRt, const Operand *OpAddress,
// cccc011pu0w1nnnnttttiiiiiss0mmmm where cccc=Cond, tttt=Rt, U=1 if +, pu0b // cccc011pu0w1nnnnttttiiiiiss0mmmm where cccc=Cond, tttt=Rt, U=1 if +, pu0b
// is a BlockAddr, and pu0w0nnnn0000iiiiiss0mmmm=Address. // is a BlockAddr, and pu0w0nnnn0000iiiiiss0mmmm=Address.
constexpr bool IsByte = false; constexpr bool IsByte = false;
return emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, LdrName); emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, LdrName);
return;
} }
} }
} }
...@@ -1327,6 +1341,18 @@ void AssemblerARM32::mvn(const Operand *OpRd, const Operand *OpSrc, ...@@ -1327,6 +1341,18 @@ void AssemblerARM32::mvn(const Operand *OpRd, const Operand *OpSrc,
MvnName); MvnName);
} }
void AssemblerARM32::nop() {
// NOP - Section A8.8.119, encoding A1:
// nop<c>
//
// cccc0011001000001111000000000000 where cccc=Cond.
AssemblerBuffer::EnsureCapacity ensured(&Buffer);
constexpr CondARM32::Cond Cond = CondARM32::AL;
const IValueT Encoding = (encodeCondition(Cond) << kConditionShift) | B25 |
B24 | B21 | B15 | B14 | B13 | B12;
emitInst(Encoding);
}
void AssemblerARM32::sbc(const Operand *OpRd, const Operand *OpRn, void AssemblerARM32::sbc(const Operand *OpRd, const Operand *OpRn,
const Operand *OpSrc1, bool SetFlags, const Operand *OpSrc1, bool SetFlags,
CondARM32::Cond Cond) { CondARM32::Cond Cond) {
...@@ -1391,7 +1417,8 @@ void AssemblerARM32::str(const Operand *OpRt, const Operand *OpAddress, ...@@ -1391,7 +1417,8 @@ void AssemblerARM32::str(const Operand *OpRt, const Operand *OpAddress,
// cccc010pu1w0nnnnttttiiiiiiiiiiii where cccc=Cond, tttt=Rt, nnnn=Rn, // cccc010pu1w0nnnnttttiiiiiiiiiiii where cccc=Cond, tttt=Rt, nnnn=Rn,
// iiiiiiiiiiii=imm12, u=1 if +. // iiiiiiiiiiii=imm12, u=1 if +.
constexpr bool IsByte = true; constexpr bool IsByte = true;
return emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, StrName); emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, StrName);
return;
} }
case 1: { case 1: {
// Handles i16 stores. // Handles i16 stores.
...@@ -1405,7 +1432,8 @@ void AssemblerARM32::str(const Operand *OpRt, const Operand *OpAddress, ...@@ -1405,7 +1432,8 @@ void AssemblerARM32::str(const Operand *OpRt, const Operand *OpAddress,
// iiiiiiii=Imm8, u=1 if +, pu0w is a BlockAddr, and // iiiiiiii=Imm8, u=1 if +, pu0w is a BlockAddr, and
// pu0w0nnnn0000iiiiiiiiiiii=Address. // pu0w0nnnn0000iiiiiiiiiiii=Address.
constexpr const char *Strh = "strh"; constexpr const char *Strh = "strh";
return emitMemOpEnc3(Cond, B7 | B5 | B4, Rt, OpAddress, TInfo, Strh); emitMemOpEnc3(Cond, B7 | B5 | B4, Rt, OpAddress, TInfo, Strh);
return;
} }
case 2: { case 2: {
// Note: Handles i32 and float stores. Target lowering handles i64 and // Note: Handles i32 and float stores. Target lowering handles i64 and
...@@ -1419,8 +1447,8 @@ void AssemblerARM32::str(const Operand *OpRt, const Operand *OpAddress, ...@@ -1419,8 +1447,8 @@ void AssemblerARM32::str(const Operand *OpRt, const Operand *OpAddress,
// cccc010pu1w0nnnnttttiiiiiiiiiiii where cccc=Cond, tttt=Rt, nnnn=Rn, // cccc010pu1w0nnnnttttiiiiiiiiiiii where cccc=Cond, tttt=Rt, nnnn=Rn,
// iiiiiiiiiiii=imm12, u=1 if +. // iiiiiiiiiiii=imm12, u=1 if +.
constexpr bool IsByte = false; constexpr bool IsByte = false;
return emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, StrName); emitMemOp(Cond, IsLoad, IsByte, Rt, OpAddress, TInfo, StrName);
return setNeedsTextFixup(); return;
} }
} }
} }
......
...@@ -135,10 +135,7 @@ public: ...@@ -135,10 +135,7 @@ public:
return llvm::ArrayRef<uint8_t>(Padding, 4); return llvm::ArrayRef<uint8_t>(Padding, 4);
} }
void padWithNop(intptr_t Padding) override { void padWithNop(intptr_t Padding) override;
(void)Padding;
llvm_unreachable("Not yet implemented.");
}
Ice::Label *getCfgNodeLabel(SizeT NodeNumber) override { Ice::Label *getCfgNodeLabel(SizeT NodeNumber) override {
assert(NodeNumber < CfgNodeLabels.size()); assert(NodeNumber < CfgNodeLabels.size());
...@@ -244,6 +241,8 @@ public: ...@@ -244,6 +241,8 @@ public:
void mvn(const Operand *OpRd, const Operand *OpScc, CondARM32::Cond Cond); void mvn(const Operand *OpRd, const Operand *OpScc, CondARM32::Cond Cond);
void nop();
void orr(const Operand *OpRd, const Operand *OpRn, const Operand *OpSrc1, void orr(const Operand *OpRd, const Operand *OpRn, const Operand *OpSrc1,
bool SetFlags, CondARM32::Cond Cond); bool SetFlags, CondARM32::Cond Cond);
......
...@@ -101,7 +101,9 @@ public: ...@@ -101,7 +101,9 @@ public:
} }
private: private:
void nearLinkTo(intptr_t position) { void nearLinkTo(const Assembler &Asm, intptr_t position) {
if (Asm.getPreliminary())
return;
assert(!isBound()); assert(!isBound());
UnresolvedNearPositions.push_back(position); UnresolvedNearPositions.push_back(position);
} }
......
...@@ -3404,17 +3404,15 @@ void AssemblerX86Base<Machine>::emitLabelLink(Label *Label) { ...@@ -3404,17 +3404,15 @@ void AssemblerX86Base<Machine>::emitLabelLink(Label *Label) {
assert(!Label->isBound()); assert(!Label->isBound());
intptr_t Position = Buffer.size(); intptr_t Position = Buffer.size();
emitInt32(Label->Position); emitInt32(Label->Position);
if (!getPreliminary()) Label->linkTo(*this, Position);
Label->linkTo(Position);
} }
template <class Machine> template <class Machine>
void AssemblerX86Base<Machine>::emitNearLabelLink(Label *label) { void AssemblerX86Base<Machine>::emitNearLabelLink(Label *Label) {
assert(!label->isBound()); assert(!Label->isBound());
intptr_t position = Buffer.size(); intptr_t Position = Buffer.size();
emitUint8(0); emitUint8(0);
if (!getPreliminary()) Label->nearLinkTo(*this, Position);
label->nearLinkTo(position);
} }
template <class Machine> template <class Machine>
......
...@@ -1286,12 +1286,6 @@ void InstARM32Pop::emitIAS(const Cfg *Func) const { ...@@ -1286,12 +1286,6 @@ void InstARM32Pop::emitIAS(const Cfg *Func) const {
// Note: Can only apply pop register if single register is not sp. // Note: Can only apply pop register if single register is not sp.
assert((RegARM32::Encoded_Reg_sp != LastDest->getRegNum()) && assert((RegARM32::Encoded_Reg_sp != LastDest->getRegNum()) &&
"Effects of pop register SP is undefined!"); "Effects of pop register SP is undefined!");
// TODO(kschimpf) ARM sandbox does not allow the single register form of
// pop, and the popList form expects multiple registers. Convert this
// assert to a conditional check once it has been shown that popList
// works.
assert(!Func->getContext()->getFlags().getUseSandboxing() &&
"pop register not in ARM sandbox!");
Asm->pop(LastDest, CondARM32::AL); Asm->pop(LastDest, CondARM32::AL);
break; break;
default: default:
...@@ -1392,12 +1386,6 @@ void InstARM32Push::emitIAS(const Cfg *Func) const { ...@@ -1392,12 +1386,6 @@ void InstARM32Push::emitIAS(const Cfg *Func) const {
// Note: Can only apply push register if single register is not sp. // Note: Can only apply push register if single register is not sp.
assert((RegARM32::Encoded_Reg_sp != LastSrc->getRegNum()) && assert((RegARM32::Encoded_Reg_sp != LastSrc->getRegNum()) &&
"Effects of push register SP is undefined!"); "Effects of push register SP is undefined!");
// TODO(kschimpf) ARM sandbox does not allow the single register form of
// push, and the pushList form expects multiple registers. Convert this
// assert to a conditional check once it has been shown that pushList
// works.
assert(!Func->getContext()->getFlags().getUseSandboxing() &&
"push register not in ARM sandbox!");
Asm->push(LastSrc, CondARM32::AL); Asm->push(LastSrc, CondARM32::AL);
break; break;
} }
...@@ -1796,6 +1784,21 @@ void OperandARM32ShAmtImm::dump(const Cfg *, Ostream &Str) const { ...@@ -1796,6 +1784,21 @@ void OperandARM32ShAmtImm::dump(const Cfg *, Ostream &Str) const {
ShAmt->dump(Str); ShAmt->dump(Str);
} }
OperandARM32FlexImm *OperandARM32FlexImm::create(Cfg *Func, Type Ty,
uint32_t Imm,
uint32_t RotateAmt) {
// The assembler wants the smallest rotation. Rotate if needed. Note: Imm is
// an 8-bit value.
assert(Utils::IsUint(8, Imm) &&
"Flex immediates can only be defined on 8-bit immediates");
while ((Imm & 0x03) == 0 && RotateAmt > 0) {
--RotateAmt;
Imm = Imm >> 2;
}
return new (Func->allocate<OperandARM32FlexImm>())
OperandARM32FlexImm(Func, Ty, Imm, RotateAmt);
}
void OperandARM32FlexImm::emit(const Cfg *Func) const { void OperandARM32FlexImm::emit(const Cfg *Func) const {
if (!BuildDefs::dump()) if (!BuildDefs::dump())
return; return;
......
...@@ -211,10 +211,7 @@ class OperandARM32FlexImm : public OperandARM32Flex { ...@@ -211,10 +211,7 @@ class OperandARM32FlexImm : public OperandARM32Flex {
public: public:
/// Immed_8 rotated by an even number of bits (2 * RotateAmt). /// Immed_8 rotated by an even number of bits (2 * RotateAmt).
static OperandARM32FlexImm *create(Cfg *Func, Type Ty, uint32_t Imm, static OperandARM32FlexImm *create(Cfg *Func, Type Ty, uint32_t Imm,
uint32_t RotateAmt) { uint32_t RotateAmt);
return new (Func->allocate<OperandARM32FlexImm>())
OperandARM32FlexImm(Func, Ty, Imm, RotateAmt);
}
void emit(const Cfg *Func) const override; void emit(const Cfg *Func) const override;
using OperandARM32::dump; using OperandARM32::dump;
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
; instructions with well known sizes and minimal use of registers and stack ; instructions with well known sizes and minimal use of registers and stack
; slots in the lowering sequence. ; slots in the lowering sequence.
; REQUIRES: allow_dump, target_arm32 ; REQUIRES: allow_dump, target_ARM32
; RUN: %p2i -i %s --sandbox --filetype=asm --target=arm32 --assemble \ ; RUN: %p2i -i %s --sandbox --filetype=asm --target=arm32 --assemble \
; RUN: --disassemble --args -Om1 -allow-externally-defined-symbols \ ; RUN: --disassemble --args -Om1 -allow-externally-defined-symbols \
; RUN: -ffunction-sections | FileCheck %s ; RUN: -ffunction-sections | FileCheck %s
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment