Commit 9aedc2e9 by Karl Schimpf

Add vmov between floating point registers to ARM assembler.

Adds generating binary versions of vmov for moves between floating point registers, in the integrated ARM assembler. Also adds simple lit test. Also simplifies the lit test for push/pop (which had to be changed anyway since it included vmov instructions as well). BUG= https://bugs.chromium.org/p/nativeclient/issues/detail?id=4334 R=eholk@chromium.org, jpp@chromium.org Review URL: https://codereview.chromium.org/1645683003 .
parent b58170c5
......@@ -917,16 +917,17 @@ void Assembler::EmitVFPddd(Condition cond, int32_t opcode,
(static_cast<int32_t>(dm) & 0xf);
Emit(encoding);
}
#endif
// Moved to Arm32::AssemblerARM32::vmovss()
void Assembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
}
// Moved to Arm32::AssemblerARM32::vmovdd()
void Assembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
}
#endif
#if 0
// Moved to Arm32::AssemblerARM32::vmovs()
......
......@@ -627,8 +627,12 @@ class Assembler : public ValueObject {
void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL);
void vmovdr(DRegister dd, int i, Register rt, Condition cond = AL);
#if 0
// Moved to ARM32::AssemblerARM32::vmovss().
void vmovs(SRegister sd, SRegister sm, Condition cond = AL);
// Moved to ARM32::AssemblerARM32::vmovdd().
void vmovd(DRegister dd, DRegister dm, Condition cond = AL);
#endif
void vmovq(QRegister qd, QRegister qm);
#if 0
......
......@@ -2413,6 +2413,20 @@ void AssemblerARM32::vmovd(const Operand *OpDd,
emitVFPddd(Cond, OpcodePlusImm8, Dd, D0, D0);
}
void AssemblerARM32::vmovdd(const Operand *OpDd, const Operand *OpDm,
CondARM32::Cond Cond) {
// VMOV (register) - ARM section A8.8.340, encoding A2:
// vmov<c>.f64 <Dd>, <Sm>
//
// cccc11101D110000dddd101101M0mmmm where cccc=Cond, Ddddd=Sd, and Mmmmm=Sm.
constexpr const char *Vmovdd = "Vmovdd";
IValueT Dd = encodeSRegister(OpDd, "Dd", Vmovdd);
IValueT Dm = encodeSRegister(OpDm, "Dm", Vmovdd);
constexpr IValueT VmovddOpcode = B23 | B21 | B20 | B6;
constexpr IValueT D0 = 0;
emitVFPddd(Cond, VmovddOpcode, Dd, D0, Dm);
}
void AssemblerARM32::vmovs(const Operand *OpSd,
const OperandARM32FlexFpImm *OpFpImm,
CondARM32::Cond Cond) {
......@@ -2430,6 +2444,20 @@ void AssemblerARM32::vmovs(const Operand *OpSd,
emitVFPsss(Cond, OpcodePlusImm8, Sd, S0, S0);
}
void AssemblerARM32::vmovss(const Operand *OpSd, const Operand *OpSm,
CondARM32::Cond Cond) {
// VMOV (register) - ARM section A8.8.340, encoding A2:
// vmov<c>.f32 <Sd>, <Sm>
//
// cccc11101D110000dddd101001M0mmmm where cccc=Cond, ddddD=Sd, and mmmmM=Sm.
constexpr const char *Vmovss = "Vmovss";
IValueT Sd = encodeSRegister(OpSd, "Sd", Vmovss);
IValueT Sm = encodeSRegister(OpSm, "Sm", Vmovss);
constexpr IValueT VmovssOpcode = B23 | B21 | B20 | B6;
constexpr IValueT S0 = 0;
emitVFPsss(Cond, VmovssOpcode, Sd, S0, Sm);
}
void AssemblerARM32::vmovsr(const Operand *OpSn, const Operand *OpRt,
CondARM32::Cond Cond) {
// VMOV (between ARM core register and single-precision register)
......
......@@ -385,9 +385,13 @@ public:
void vmovd(const Operand *OpDn, const OperandARM32FlexFpImm *OpFpImm,
CondARM32::Cond Cond);
void vmovdd(const Operand *OpDd, const Operand *OpDm, CondARM32::Cond Cond);
void vmovs(const Operand *OpSn, const OperandARM32FlexFpImm *OpFpImm,
CondARM32::Cond Cond);
void vmovss(const Operand *OpDd, const Operand *OpDm, CondARM32::Cond Cond);
void vmovsr(const Operand *OpSn, const Operand *OpRt, CondARM32::Cond Cond);
void vmlad(const Operand *OpDd, const Operand *OpDn, const Operand *OpDm,
......
......@@ -1165,19 +1165,29 @@ void InstARM32Mov::emitIASScalarVFPMove(const Cfg *Func) const {
switch (Dest->getType()) {
default:
assert(false && "Do not know how to emit scalar FP move for type.");
return;
break;
case IceType_f32:
if (const auto *FpImm = llvm::dyn_cast<OperandARM32FlexFpImm>(Src0)) {
if (llvm::isa<Variable>(Src0)) {
Asm->vmovss(Dest, Src0, getPredicate());
return;
} else if (const auto *FpImm =
llvm::dyn_cast<OperandARM32FlexFpImm>(Src0)) {
Asm->vmovs(Dest, FpImm, getPredicate());
return;
}
break;
assert(!Asm->needsTextFixup());
return;
case IceType_f64:
if (const auto *FpImm = llvm::dyn_cast<OperandARM32FlexFpImm>(Src0)) {
if (llvm::isa<Variable>(Src0)) {
Asm->vmovdd(Dest, Src0, getPredicate());
return;
} else if (const auto *FpImm =
llvm::dyn_cast<OperandARM32FlexFpImm>(Src0)) {
Asm->vmovd(Dest, FpImm, getPredicate());
return;
}
break;
assert(!Asm->needsTextFixup());
return;
}
// TODO(kschimpf) Handle register to register move.
Asm->setNeedsTextFixup();
......
; Show that we know how to move between floating point registers.
; NOTE: We use the select instruction to fire this in -Om1, since a
; vmovne is generated (after a branch) to (conditionally) assign the
; else value.
; REQUIRES: allow_dump
; Compile using standalone assembler.
; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -Om1 \
; RUN: -reg-use s20,s22,d20,d22 \
; RUN: | FileCheck %s --check-prefix=ASM
; Show bytes in assembled standalone code.
; RUN: %p2i --filetype=asm -i %s --target=arm32 --assemble --disassemble \
; RUN: --args -Om1 \
; RUN: -reg-use s20,s22,d20,d22 \
; RUN: | FileCheck %s --check-prefix=DIS
; Compile using integrated assembler.
; RUN: %p2i --filetype=iasm -i %s --target=arm32 --args -Om1 \
; RUN: -reg-use s20,s22,d20,d22 \
; RUN: | FileCheck %s --check-prefix=IASM
; Show bytes in assembled integrated code.
; RUN: %p2i --filetype=iasm -i %s --target=arm32 --assemble --disassemble \
; RUN: --args -Om1 \
; RUN: -reg-use s20,s22,d20,d22 \
; RUN: | FileCheck %s --check-prefix=DIS
define internal float @moveFloat() {
; ASM-LABEL: moveFloat:
; DIS-LABEL: 00000000 <moveFloat>:
; IASM-LABEL: moveFloat:
%v = select i1 true, float 0.5, float 1.5
; ASM: vmovne.f32 s20, s22
; DIS: 1c: 1eb0aa4b
; IASM-NOT: vmovnew.f32
ret float %v
}
define internal double @moveDouble() {
; ASM-LABEL: moveDouble:
; DIS-LABEL: 00000040 <moveDouble>:
; IASM-LABEL: moveDouble:
%v = select i1 true, double 0.5, double 1.5
; ASM: vmovne.f64 d20, d22
; DIS: 54: 1ef04b66
; IASM-NOT: vmovne.f64
ret double %v
}
......@@ -7,112 +7,38 @@
; REQUIRES: allow_dump
; Compile using standalone assembler.
; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -O2 \
; RUN: %p2i --filetype=asm -i %s --target=arm32 --args -O2 -reg-use=d9,d10 \
; RUN: | FileCheck %s --check-prefix=ASM
; Show bytes in assembled standalone code.
; RUN: %p2i --filetype=asm -i %s --target=arm32 --assemble --disassemble \
; RUN: --args -O2 | FileCheck %s --check-prefix=DIS
; RUN: --args -O2 -reg-use=d9,d10 \
; RUN: | FileCheck %s --check-prefix=DIS
; Compile using integrated assembler.
; RUN: %p2i --filetype=iasm -i %s --target=arm32 --args -O2 \
; RUN: %p2i --filetype=iasm -i %s --target=arm32 --args -O2 -reg-use=d9,d10 \
; RUN: | FileCheck %s --check-prefix=IASM
; Show bytes in assembled integrated code.
; RUN: %p2i --filetype=iasm -i %s --target=arm32 --assemble --disassemble \
; RUN: --args -O2 | FileCheck %s --check-prefix=DIS
; RUN: --args -O2 -reg-use=d9,d10 \
; RUN: | FileCheck %s --check-prefix=DIS
define internal double @testVpushVpop(double %v1, double %v2) {
; ASM-LABEL: testVpushVpop:
; DIS-LABEL: 00000000 <testVpushVpop>:
; IASM-LABEL: testVpushVpop:
entry:
; ASM-NEXT: .LtestVpushVpop$entry:
; IASM-NEXT: .LtestVpushVpop$entry:
; ASM-NEXT: vpush {s16, s17, s18, s19}
; DIS-NEXT: 0: ed2d8a04
; IASM-NEXT: .byte 0x4
; IASM-NEXT: .byte 0x8a
; IASM-NEXT: .byte 0x2d
; IASM-NEXT: .byte 0xed
; ASM-NEXT: push {lr}
; DIS-NEXT: 4: e52de004
; IASM-NEXT: .byte 0x4
; IASM-NEXT: .byte 0xe0
; IASM-NEXT: .byte 0x2d
; IASM-NEXT: .byte 0xe5
; ASM-NEXT: sub sp, sp, #12
; DIS-NEXT: 8: e24dd00c
; IASM-NEXT: .byte 0xc
; IASM-NEXT: .byte 0xd0
; IASM-NEXT: .byte 0x4d
; IASM-NEXT: .byte 0xe2
; ASM-NEXT: vmov.f64 d8, d0
; DIS-NEXT: c: eeb08b40
; IASM-NEXT: vmov.f64 d8, d0
; ASM-NEXT: vmov.f64 d9, d1
; DIS-NEXT: 10: eeb09b41
; IASM-NEXT: vmov.f64 d9, d1
; ASM: vpush {s18, s19, s20, s21}
; DIS: 0: ed2d9a04
; IASM-NOT: vpush
call void @foo()
; ASM-NEXT: bl foo
; DIS-NEXT: 14: ebfffffe
; IASM-NEXT: bl foo @ .word ebfffffe
%res = fadd double %v1, %v2
; ASM-NEXT: vadd.f64 d8, d8, d9
; DIS-NEXT: 18: ee388b09
; IASM-NEXT: .byte 0x9
; IASM-NEXT: .byte 0x8b
; IASM-NEXT: .byte 0x38
; IASM-NEXT: .byte 0xee
; ASM-NEXT: vmov.f64 d0, d8
; DIS-NEXT: 1c: eeb00b48
; IASM-NEXT: vmov.f64 d0, d8
ret double %res
; ASM-NEXT: add sp, sp, #12
; DIS-NEXT: 20: e28dd00c
; IASM-NEXT: .byte 0xc
; IASM-NEXT: .byte 0xd0
; IASM-NEXT: .byte 0x8d
; IASM-NEXT: .byte 0xe2
; ASM-NEXT: pop {lr}
; ASM-NEXT: # lr = def.pseudo
; DIS-NEXT: 24: e49de004
; IASM-NEXT: .byte 0x4
; IASM-NEXT: .byte 0xe0
; IASM-NEXT: .byte 0x9d
; IASM-NEXT: .byte 0xe4
; ASM-NEXT: vpop {s16, s17, s18, s19}
; ASM-NEXT: # s16 = def.pseudo
; ASM-NEXT: # s17 = def.pseudo
; ASM-NEXT: # s18 = def.pseudo
; ASM-NEXT: # s19 = def.pseudo
; DIS-NEXT: 28: ecbd8a04
; IASM-NEXT: .byte 0x4
; IASM-NEXT: .byte 0x8a
; IASM-NEXT: .byte 0xbd
; IASM-NEXT: .byte 0xec
; ASM-NEXT: bx lr
; DIS-NEXT: 2c: e12fff1e
; IASM-NEXT: .byte 0x1e
; IASM-NEXT: .byte 0xff
; IASM-NEXT: .byte 0x2f
; IASM-NEXT: .byte 0xe1
; ASM: vpop {s18, s19, s20, s21}
; DIS: 28: ecbd9a04
; IASM-NOT: vpopd
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment