Commit 33246427 by Jim Stichnoth

Subzero: Fix new issues after the LLVM 3.5 merge.

We need to link with -lpthread now. The CALLTARGETS workaround in our lit tests can be removed, since llvm-objdump has gotten more accurate than before with respect to symbols. The -stats and -rng-seed options need to be renamed to avoid conflicting with the LLVM options being brought in. BUG= https://code.google.com/p/nativeclient/issues/detail?id=3930 R=jvoung@chromium.org Review URL: https://codereview.chromium.org/756543002
parent 79f2a03b
......@@ -74,7 +74,8 @@ $(info -----------------------------------------------)
LLVM_CXXFLAGS := `$(LLVM_BIN_PATH)/llvm-config --cxxflags`
LLVM_LDFLAGS := `$(LLVM_BIN_PATH)/llvm-config --libs` \
`$(LLVM_BIN_PATH)/llvm-config --ldflags`
`$(LLVM_BIN_PATH)/llvm-config --ldflags` \
`$(LLVM_BIN_PATH)/llvm-config --system-libs`
# It's recommended that CXX matches the compiler you used to build LLVM itself.
CCACHE := `command -v ccache`
......@@ -133,7 +134,7 @@ make_symlink: $(OBJDIR)/llvm2ice
# TODO(kschimpf): Fix python scripts to directly get build attributes
# rather than generating $(OBJDIR)/llvm2ice.build_atts.
$(OBJDIR)/llvm2ice: $(OBJS)
$(CXX) $(LDFLAGS) -o $@ $^ $(LLVM_LDFLAGS) -ldl \
$(CXX) $(LDFLAGS) -o $@ $^ $(LLVM_LDFLAGS) \
-Wl,-rpath=$(abspath $(LIBCXX_INSTALL_PATH)/lib)
# TODO: Be more precise than "*.h" here and elsewhere.
......
......@@ -22,8 +22,11 @@ namespace Ice {
namespace {
namespace cl = llvm::cl;
// TODO(stichnot): See if we can easily use LLVM's -rng-seed option
// and implementation. I expect the implementation is different and
// therefore the tests would need to be changed.
cl::opt<unsigned long long>
RandomSeed("rng-seed", cl::desc("Seed the random number generator"),
RandomSeed("sz-seed", cl::desc("Seed the random number generator"),
cl::init(time(0)));
const unsigned MAX = 2147483647;
......
......@@ -129,7 +129,7 @@ static cl::opt<bool> DecorateAsm(
cl::desc("Decorate textual asm output with register liveness info"));
static cl::opt<bool>
DumpStats("stats",
DumpStats("szstats",
cl::desc("Print statistics after translating each function"));
// This is currently needed by crosstest.py.
......
......@@ -60,7 +60,7 @@ entry:
ret i32 %result
}
; CHECK-LABEL: testXor16Imm8Neg
; CHECK: 66 83 f0 80 xor ax, 128
; CHECK: 66 83 f0 80 xor ax, -128
define internal i32 @testXor16Imm16Eax(i32 %arg) {
entry:
......@@ -193,7 +193,7 @@ entry:
ret i32 %result
}
; CHECK-LABEL: testMul16Imm8Neg
; CHECK: 66 6b c0 91 imul ax, ax, 145
; CHECK: 66 6b c0 91 imul ax, ax, -111
; CHECK-NEXT: add ax, 1
define internal i32 @testMul16Imm16(i32 %arg) {
......
......@@ -2,12 +2,6 @@
; particular the patterns for lowering i64 operations into constituent
; i32 operations on x86-32.
; TODO(jvoung): fix extra "CALLTARGETS" run. The llvm-objdump symbolizer
; doesn't know how to symbolize non-section-local functions.
; The newer LLVM 3.6 one does work, but watch out for other bugs.
; RUN: %p2i -i %s --args -O2 --verbose none \
; RUN: | FileCheck --check-prefix=CALLTARGETS %s
; RUN: %p2i -i %s --args -O2 --verbose none \
; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
......@@ -36,31 +30,27 @@ entry:
ret i32 %add3
}
; CHECK-LABEL: pass64BitArg
; CALLTARGETS-LABEL: pass64BitArg
; CHECK: sub esp
; CHECK: mov dword ptr [esp + 4]
; CHECK: mov dword ptr [esp]
; CHECK: mov dword ptr [esp + 8], 123
; CHECK: mov dword ptr [esp + 16]
; CHECK: mov dword ptr [esp + 12]
; CHECK: call -4
; CALLTARGETS: .long ignore64BitArgNoInline
; CHECK: call ignore64BitArgNoInline
; CHECK: sub esp
; CHECK: mov dword ptr [esp + 4]
; CHECK: mov dword ptr [esp]
; CHECK: mov dword ptr [esp + 8], 123
; CHECK: mov dword ptr [esp + 16]
; CHECK: mov dword ptr [esp + 12]
; CHECK: call -4
; CALLTARGETS: .long ignore64BitArgNoInline
; CHECK: call ignore64BitArgNoInline
; CHECK: sub esp
; CHECK: mov dword ptr [esp + 4]
; CHECK: mov dword ptr [esp]
; CHECK: mov dword ptr [esp + 8], 123
; CHECK: mov dword ptr [esp + 16]
; CHECK: mov dword ptr [esp + 12]
; CHECK: call -4
; CALLTARGETS: .long ignore64BitArgNoInline
; CHECK: call ignore64BitArgNoInline
;
; OPTM1-LABEL: pass64BitArg
; OPTM1: sub esp
......@@ -69,21 +59,21 @@ entry:
; OPTM1: mov dword ptr [esp + 8], 123
; OPTM1: mov dword ptr [esp + 16]
; OPTM1: mov dword ptr [esp + 12]
; OPTM1: call -4
; OPTM1: call ignore64BitArgNoInline
; OPTM1: sub esp
; OPTM1: mov dword ptr [esp + 4]
; OPTM1: mov dword ptr [esp]
; OPTM1: mov dword ptr [esp + 8], 123
; OPTM1: mov dword ptr [esp + 16]
; OPTM1: mov dword ptr [esp + 12]
; OPTM1: call -4
; OPTM1: call ignore64BitArgNoInline
; OPTM1: sub esp
; OPTM1: mov dword ptr [esp + 4]
; OPTM1: mov dword ptr [esp]
; OPTM1: mov dword ptr [esp + 8], 123
; OPTM1: mov dword ptr [esp + 16]
; OPTM1: mov dword ptr [esp + 12]
; OPTM1: call -4
; OPTM1: call ignore64BitArgNoInline
declare i32 @ignore64BitArgNoInline(i64, i32, i64)
......@@ -93,7 +83,6 @@ entry:
ret i32 %call
}
; CHECK-LABEL: pass64BitConstArg
; CALLTARGETS-LABEL: pass64BitConstArg
; CHECK: sub esp
; CHECK: mov dword ptr [esp + 4]
; CHECK-NEXT: mov dword ptr [esp]
......@@ -103,8 +92,7 @@ entry:
; CHECK-NEXT: mov dword ptr [esp + 12], 305419896
; Bundle padding will push the call down.
; CHECK-NOT: mov
; CHECK: call -4
; CALLTARGETS: .long ignore64BitArgNoInline
; CHECK: call ignore64BitArgNoInline
;
; OPTM1-LABEL: pass64BitConstArg
; OPTM1: sub esp
......@@ -115,7 +103,7 @@ entry:
; OPTM1: mov dword ptr [esp + 16], 3735928559
; OPTM1-NEXT: mov dword ptr [esp + 12], 305419896
; OPTM1-NOT: mov
; OPTM1: call -4
; OPTM1: call ignore64BitArgNoInline
define internal i64 @return64BitArg(i64 %a) {
entry:
......@@ -237,12 +225,10 @@ entry:
ret i64 %div
}
; CHECK-LABEL: div64BitSigned
; CALLTARGETS-LABEL: div64BitSigned
; CHECK: call -4
; CALLTARGETS: .long __divdi3
; CHECK: call __divdi3
; OPTM1-LABEL: div64BitSigned
; OPTM1: call -4
; OPTM1: call __divdi3
define internal i64 @div64BitSignedConst(i64 %a) {
entry:
......@@ -250,16 +236,14 @@ entry:
ret i64 %div
}
; CHECK-LABEL: div64BitSignedConst
; CALLTARGETS-LABEL: div64BitSignedConst
; CHECK: mov dword ptr [esp + 12], 2874
; CHECK: mov dword ptr [esp + 8], 1942892530
; CHECK: call -4
; CALLTARGETS: .long __divdi3
; CHECK: call __divdi3
;
; OPTM1-LABEL: div64BitSignedConst
; OPTM1: mov dword ptr [esp + 12], 2874
; OPTM1: mov dword ptr [esp + 8], 1942892530
; OPTM1: call -4
; OPTM1: call __divdi3
define internal i64 @div64BitUnsigned(i64 %a, i64 %b) {
entry:
......@@ -267,12 +251,10 @@ entry:
ret i64 %div
}
; CHECK-LABEL: div64BitUnsigned
; CALLTARGETS-LABEL: div64BitUnsigned
; CHECK: call -4
; CALLTARGETS: .long __udivdi3
; CHECK: call __udivdi3
;
; OPTM1-LABEL: div64BitUnsigned
; OPTM1: call -4
; OPTM1: call __udivdi3
define internal i64 @rem64BitSigned(i64 %a, i64 %b) {
entry:
......@@ -280,12 +262,10 @@ entry:
ret i64 %rem
}
; CHECK-LABEL: rem64BitSigned
; CALLTARGETS-LABEL: rem64BitSigned
; CHECK: call -4
; CALLTARGETS: .long __moddi3
; CHECK: call __moddi3
;
; OPTM1-LABEL: rem64BitSigned
; OPTM1: call -4
; OPTM1: call __moddi3
define internal i64 @rem64BitUnsigned(i64 %a, i64 %b) {
entry:
......@@ -293,12 +273,10 @@ entry:
ret i64 %rem
}
; CHECK-LABEL: rem64BitUnsigned
; CALLTARGETS-LABEL: rem64BitUnsigned
; CHECK: call -4
; CALLTARGETS: .long __umoddi3
; CHECK: call __umoddi3
;
; OPTM1-LABEL: rem64BitUnsigned
; OPTM1: call -4
; OPTM1: call __umoddi3
define internal i64 @shl64BitSigned(i64 %a, i64 %b) {
entry:
......
......@@ -42,10 +42,9 @@ entry:
; CHECK: mov dword ptr [
; CHECK: movsx
; CHECK: sar {{.*}}, 31
; This appears to be a bug in llvm-mc. It should be [8] and [12] to represent
; i64v and i64+4.
; CHECK-DAG: [8]
; CHECK-DAG: [8]
; This appears to be a bug in llvm-mc. It should be i64v and i64+4.
; CHECK-DAG: [.bss]
; CHECK-DAG: [.bss]
define void @from_int16() {
entry:
......@@ -64,12 +63,12 @@ entry:
}
; CHECK-LABEL: from_int16
; CHECK: mov {{.*}}, word ptr [
; CHECK: [0]
; CHECK: [.bss]
; CHECK: movsx e{{.*}}, {{.*x|[ds]i|bp|word ptr}}
; CHECK: [4]
; CHECK: [.bss]
; CHECK: movsx e{{.*}}, {{.*x|[ds]i|bp|word ptr}}
; CHECK: sar {{.*}}, 31
; CHECK: [8]
; CHECK: [.bss]
define void @from_int32() {
entry:
......@@ -87,11 +86,11 @@ entry:
ret void
}
; CHECK-LABEL: from_int32
; CHECK: [4]
; CHECK: [0]
; CHECK: [2]
; CHECK: [.bss]
; CHECK: [.bss]
; CHECK: [.bss]
; CHECK: sar {{.*}}, 31
; CHECK: [8]
; CHECK: [.bss]
define void @from_int64() {
entry:
......@@ -109,10 +108,10 @@ entry:
ret void
}
; CHECK-LABEL: from_int64
; CHECK: [8]
; CHECK: [0]
; CHECK: [2]
; CHECK: [4]
; CHECK: [.bss]
; CHECK: [.bss]
; CHECK: [.bss]
; CHECK: [.bss]
define void @from_uint8() {
......@@ -131,14 +130,14 @@ entry:
ret void
}
; CHECK-LABEL: from_uint8
; CHECK: [16]
; CHECK: [.bss]
; CHECK: movzx e{{.*}}, {{[a-d]l|byte ptr}}
; CHECK: [2]
; CHECK: [.bss]
; CHECK: movzx
; CHECK: [4]
; CHECK: [.bss]
; CHECK: movzx
; CHECK: mov {{.*}}, 0
; CHECK: [8]
; CHECK: [.bss]
define void @from_uint16() {
entry:
......@@ -156,13 +155,13 @@ entry:
ret void
}
; CHECK-LABEL: from_uint16
; CHECK: [18]
; CHECK: [0]
; CHECK: [.bss]
; CHECK: [.bss]
; CHECK: movzx e{{.*}}, {{.*x|[ds]i|bp|word ptr}}
; CHECK: [4]
; CHECK: [.bss]
; CHECK: movzx e{{.*}}, {{.*x|[ds]i|bp|word ptr}}
; CHECK: mov {{.*}}, 0
; CHECK: [8]
; CHECK: [.bss]
define void @from_uint32() {
entry:
......@@ -180,11 +179,11 @@ entry:
ret void
}
; CHECK-LABEL: from_uint32
; CHECK: [20]
; CHECK: [0]
; CHECK: [2]
; CHECK: [.bss]
; CHECK: [.bss]
; CHECK: [.bss]
; CHECK: mov {{.*}}, 0
; CHECK: [8]
; CHECK: [.bss]
define void @from_uint64() {
entry:
......@@ -202,7 +201,7 @@ entry:
ret void
}
; CHECK-LABEL: from_uint64
; CHECK: [24]
; CHECK: [0]
; CHECK: [2]
; CHECK: [4]
; CHECK: [.bss]
; CHECK: [.bss]
; CHECK: [.bss]
; CHECK: [.bss]
......@@ -44,4 +44,4 @@ entry:
; CHECK: mov dword ptr [esp + 4], eax
; CHECK: mov eax, dword ptr [ebp - 16]
; CHECK: mov dword ptr [esp + 8], eax
; CHECK: call -4
; CHECK: call memcpy_helper2
......@@ -3,12 +3,6 @@
; that should be present regardless of the optimization level, so
; there are no special OPTM1 match lines.
; TODO(jvoung): fix extra "CALLTARGETS" run. The llvm-objdump symbolizer
; doesn't know how to symbolize non-section-local functions.
; The newer LLVM 3.6 one does work, but watch out for other bugs.
; RUN: %p2i -i %s --args -O2 --verbose none \
; RUN: | FileCheck --check-prefix=CALLTARGETS %s
; RUN: %p2i -i %s --args -O2 --verbose none \
; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
......@@ -47,16 +41,12 @@ entry:
ret i32 %add3
}
; CHECK-LABEL: passFpArgs
; CALLTARGETS-LABEL: passFpArgs
; CHECK: mov dword ptr [esp + 4], 123
; CHECK: call -4
; CALLTARGETS: .long ignoreFpArgsNoInline
; CHECK: call ignoreFpArgsNoInline
; CHECK: mov dword ptr [esp + 4], 123
; CHECK: call -4
; CALLTARGETS: .long ignoreFpArgsNoInline
; CHECK: call ignoreFpArgsNoInline
; CHECK: mov dword ptr [esp + 4], 123
; CHECK: call -4
; CALLTARGETS: .long ignoreFpArgsNoInline
; CHECK: call ignoreFpArgsNoInline
declare i32 @ignoreFpArgsNoInline(float %x, i32 %y, double %z)
......@@ -66,10 +56,8 @@ entry:
ret i32 %call
}
; CHECK-LABEL: passFpConstArg
; CALLTARGETS-LABEL: passFpConstArg
; CHECK: mov dword ptr [esp + 4], 123
; CHECK: call -4
; CALLTARGETS: .long ignoreFpArgsNoInline
; CHECK: call ignoreFpArgsNoInline
define internal i32 @passFp32ConstArg(float %a) {
entry:
......@@ -192,9 +180,7 @@ entry:
ret float %div
}
; CHECK-LABEL: remFloat
; CALLTARGETS-LABEL: remFloat
; CHECK: call -4
; CALLTARGETS: .long fmodf
; CHECK: call fmodf
define internal double @remDouble(double %a, double %b) {
entry:
......@@ -202,9 +188,7 @@ entry:
ret double %div
}
; CHECK-LABEL: remDouble
; CALLTARGETS-LABEL: remDouble
; CHECK: call -4
; CALLTARGETS: .long fmod
; CHECK: call fmod
define internal float @fptrunc(double %a) {
entry:
......@@ -230,9 +214,7 @@ entry:
ret i64 %conv
}
; CHECK-LABEL: doubleToSigned64
; CALLTARGETS-LABEL: doubleToSigned64
; CHECK: call -4
; CALLTARGETS: .long cvtdtosi64
; CHECK: call cvtdtosi64
define internal i64 @floatToSigned64(float %a) {
entry:
......@@ -240,9 +222,7 @@ entry:
ret i64 %conv
}
; CHECK-LABEL: floatToSigned64
; CALLTARGETS-LABEL: floatToSigned64
; CHECK: call -4
; CALLTARGETS: .long cvtftosi64
; CHECK: call cvtftosi64
define internal i64 @doubleToUnsigned64(double %a) {
entry:
......@@ -250,9 +230,7 @@ entry:
ret i64 %conv
}
; CHECK-LABEL: doubleToUnsigned64
; CALLTARGETS-LABEL: doubleToUnsigned64
; CHECK: call -4
; CALLTARGETS: .long cvtdtoui64
; CHECK: call cvtdtoui64
define internal i64 @floatToUnsigned64(float %a) {
entry:
......@@ -260,9 +238,7 @@ entry:
ret i64 %conv
}
; CHECK-LABEL: floatToUnsigned64
; CALLTARGETS-LABEL: floatToUnsigned64
; CHECK: call -4
; CALLTARGETS: .long cvtftoui64
; CHECK: call cvtftoui64
define internal i32 @doubleToSigned32(double %a) {
entry:
......@@ -294,9 +270,7 @@ entry:
ret i32 %conv
}
; CHECK-LABEL: doubleToUnsigned32
; CALLTARGETS-LABEL: doubleToUnsigned32
; CHECK: call -4
; CALLTARGETS: .long cvtdtoui32
; CHECK: call cvtdtoui32
define internal i32 @floatToUnsigned32(float %a) {
entry:
......@@ -304,9 +278,7 @@ entry:
ret i32 %conv
}
; CHECK-LABEL: floatToUnsigned32
; CALLTARGETS-LABEL: floatToUnsigned32
; CHECK: call -4
; CALLTARGETS: .long cvtftoui32
; CHECK: call cvtftoui32
define internal i32 @doubleToSigned16(double %a) {
......@@ -415,9 +387,7 @@ entry:
ret double %conv
}
; CHECK-LABEL: signed64ToDouble
; CALLTARGETS-LABEL: signed64ToDouble
; CHECK: call -4
; CALLTARGETS: .long cvtsi64tod
; CHECK: call cvtsi64tod
; CHECK: fstp qword
define internal float @signed64ToFloat(i64 %a) {
......@@ -426,9 +396,7 @@ entry:
ret float %conv
}
; CHECK-LABEL: signed64ToFloat
; CALLTARGETS-LABEL: signed64ToFloat
; CHECK: call -4
; CALLTARGETS: .long cvtsi64tof
; CHECK: call cvtsi64tof
; CHECK: fstp dword
define internal double @unsigned64ToDouble(i64 %a) {
......@@ -437,9 +405,7 @@ entry:
ret double %conv
}
; CHECK-LABEL: unsigned64ToDouble
; CALLTARGETS-LABEL: unsigned64ToDouble
; CHECK: call -4
; CALLTARGETS: .long cvtui64tod
; CHECK: call cvtui64tod
; CHECK: fstp
define internal float @unsigned64ToFloat(i64 %a) {
......@@ -448,9 +414,7 @@ entry:
ret float %conv
}
; CHECK-LABEL: unsigned64ToFloat
; CALLTARGETS-LABEL: unsigned64ToFloat
; CHECK: call -4
; CALLTARGETS: .long cvtui64tof
; CHECK: call cvtui64tof
; CHECK: fstp
define internal double @unsigned64ToDoubleConst() {
......@@ -459,11 +423,9 @@ entry:
ret double %conv
}
; CHECK-LABEL: unsigned64ToDouble
; CALLTARGETS-LABEL: unsigned64ToDouble
; CHECK: mov dword ptr [esp + 4], 2874
; CHECK: mov dword ptr [esp], 1942892530
; CHECK: call -4
; CALLTARGETS: .long cvtui64tod
; CHECK: call cvtui64tod
; CHECK: fstp
define internal double @signed32ToDouble(i32 %a) {
......@@ -499,9 +461,7 @@ entry:
ret double %conv
}
; CHECK-LABEL: unsigned32ToDouble
; CALLTARGETS-LABEL: unsigned32ToDouble
; CHECK: call -4
; CALLTARGETS: .long cvtui32tod
; CHECK: call cvtui32tod
; CHECK: fstp qword
define internal float @unsigned32ToFloat(i32 %a) {
......@@ -510,9 +470,7 @@ entry:
ret float %conv
}
; CHECK-LABEL: unsigned32ToFloat
; CALLTARGETS-LABEL: unsigned32ToFloat
; CHECK: call -4
; CALLTARGETS: .long cvtui32tof
; CHECK: call cvtui32tof
; CHECK: fstp dword
define internal double @signed16ToDouble(i32 %a) {
......
......@@ -118,37 +118,37 @@ entry:
; SYMTAB-LABEL: SYMBOL TABLE
; SYMTAB-DAG: 00000000 {{.*}} .data {{.*}} PrimitiveInit
; IAS: mov {{.*}}, 0
; IAS: mov {{.*}}, .data
; IAS-NEXT: R_386_32
; IAS: call
; SYMTAB-DAG: 00000000 {{.*}} .rodata {{.*}} PrimitiveInitConst
; IAS: mov {{.*}}, 0
; IAS: mov {{.*}}, .rodata
; IAS-NEXT: R_386_32
; IAS: call
; SYMTAB-DAG: 00000000 {{.*}} .bss {{.*}} PrimitiveInitStatic
; IAS: mov {{.*}}, 0
; IAS: mov {{.*}}, .bss
; IAS-NEXT: R_386_32
; IAS: call
; SYMTAB-DAG: 00000004 {{.*}} .bss {{.*}} PrimitiveUninit
; IAS: mov {{.*}}, 4
; IAS: mov {{.*}}, .bss
; IAS-NEXT: R_386_32
; IAS: call
; SYMTAB-DAG: 00000004{{.*}}.data{{.*}}ArrayInit
; IAS: mov {{.*}}, 4
; IAS: mov {{.*}}, .data
; IAS-NEXT: R_386_32
; IAS: call
; SYMTAB-DAG: 00000018 {{.*}} .data {{.*}} ArrayInitPartial
; IAS: mov {{.*}}, 24
; IAS: mov {{.*}}, .data
; IAS-NEXT: R_386_32
; IAS: call
; SYMTAB-DAG: 00000008 {{.*}} .bss {{.*}} ArrayUninit
; IAS: mov {{.*}}, 8
; IAS: mov {{.*}}, .bss
; IAS-NEXT: R_386_32
; IAS: call
......
......@@ -49,14 +49,14 @@ entry:
; CHECK: mov dword ptr {{.*}}, 999
; atomic store (w/ its own mfence)
; The load + add are optimized into one everywhere.
; CHECK: add {{.*}}, dword ptr [0]
; CHECK: add {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: mov dword ptr
; CHECK: mfence
; CHECK: add {{.*}}, dword ptr [4]
; CHECK: add {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: mov dword ptr
; CHECK: add {{.*}}, dword ptr [8]
; CHECK: add {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: mfence
; CHECK: mov dword ptr
......@@ -91,17 +91,17 @@ entry:
; CHECK: mov {{.*}}, esp
; CHECK: mov dword ptr {{.*}}, 999
; atomic store (w/ its own mfence)
; CHECK: add {{.*}}, dword ptr [0]
; CHECK: add {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: mov dword ptr
; CHECK: mfence
; CHECK: add {{.*}}, dword ptr [4]
; CHECK: add {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: mov dword ptr
; CHECK: mfence
; Load + add can still be optimized into one instruction
; because it is not separated by a fence.
; CHECK: add {{.*}}, dword ptr [8]
; CHECK: add {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: mov dword ptr
......@@ -135,19 +135,19 @@ entry:
; CHECK: mov {{.*}}, esp
; CHECK: mov dword ptr {{.*}}, 999
; atomic store (w/ its own mfence)
; CHECK: add {{.*}}, dword ptr [0]
; CHECK: add {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: mov dword ptr
; CHECK: mfence
; This load + add are no longer optimized into one,
; though perhaps it should be legal as long as
; the load stays on the same side of the fence.
; CHECK: mov {{.*}}, dword ptr [4]
; CHECK: mov {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: mfence
; CHECK: add {{.*}}, 1
; CHECK: mov dword ptr
; CHECK: add {{.*}}, dword ptr [8]
; CHECK: add {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: mov dword ptr
......@@ -187,7 +187,7 @@ entry:
ret i32 %b1234
}
; CHECK-LABEL: could_have_fused_loads
; CHECK: mov {{.*}}, byte ptr [12]
; CHECK: mov {{.*}}, byte ptr
; CHECK-NEXT: R_386_32
; CHECK: mov {{.*}}, byte ptr
; CHECK: mov {{.*}}, byte ptr
......@@ -212,10 +212,10 @@ branch2:
}
; CHECK-LABEL: could_have_hoisted_loads
; CHECK: jne {{.*}}
; CHECK: mov {{.*}}, dword ptr [12]
; CHECK: mov {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: ret
; CHECK: mfence
; CHECK: mov {{.*}}, dword ptr [12]
; CHECK: mov {{.*}}, dword ptr [.bss]
; CHECK-NEXT: R_386_32
; CHECK: ret
; This tests the NaCl intrinsics not related to atomic operations.
; TODO(jvoung): fix extra "CALLTARGETS" run. The llvm-objdump symbolizer
; doesn't know how to symbolize non-section-local functions.
; The newer LLVM 3.6 one does work, but watch out for other bugs.
; RUN: %p2i -i %s --args -O2 --verbose none \
; RUN: | FileCheck --check-prefix=CALLTARGETS %s
; RUN: %p2i -i %s --args -O2 --verbose none -sandbox \
; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
......@@ -61,9 +55,7 @@ entry:
; CHECKO2REM-LABEL: test_nacl_read_tp
; CHECKO2REM: mov e{{.*}}, dword ptr gs:[0]
; CHECKO2UNSANDBOXEDREM-LABEL: test_nacl_read_tp
; CHECKO2UNSANDBOXEDREM: call -4
; CALLTARGETS-LABEL: test_nacl_read_tp
; CALLTARGETS: .long __nacl_read_tp
; CHECKO2UNSANDBOXEDREM: call __nacl_read_tp
define i32 @test_nacl_read_tp_more_addressing() {
entry:
......@@ -89,11 +81,8 @@ entry:
; CHECKO2REM: mov e{{.*}}, dword ptr gs:[0]
; CHECKO2REM: mov e{{.*}}, dword ptr gs:[0]
; CHECKO2UNSANDBOXEDREM-LABEL: test_nacl_read_tp_more_addressing
; CHECKO2UNSANDBOXEDREM: call -4
; CHECKO2UNSANDBOXEDREM: call -4
; CALLTARGETS-LABEL: test_nacl_read_tp_more_addressing
; CALLTARGETS: .long __nacl_read_tp
; CALLTARGETS: .long __nacl_read_tp
; CHECKO2UNSANDBOXEDREM: call __nacl_read_tp
; CHECKO2UNSANDBOXEDREM: call __nacl_read_tp
define i32 @test_nacl_read_tp_dead(i32 %a) {
entry:
......@@ -106,9 +95,7 @@ entry:
; CHECKO2REM-LABEL: test_nacl_read_tp_dead
; CHECKO2REM-NOT: mov e{{.*}}, dword ptr gs:[0]
; CHECKO2UNSANDBOXEDREM-LABEL: test_nacl_read_tp_dead
; CHECKO2UNSANDBOXEDREM-NOT: call -4
; CALLTARGETS-LABEL: test_nacl_read_tp_dead
; CALLTARGETS-NOT: call __nacl_read_tp
; CHECKO2UNSANDBOXEDREM-NOT: call __nacl_read_tp
define void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
entry:
......@@ -119,9 +106,7 @@ entry:
ret void
}
; CHECK-LABEL: test_memcpy
; CHECK: call -4
; CALLTARGETS-LABEL: test_memcpy
; CALLTARGETS: .long memcpy
; CHECK: call memcpy
; CHECKO2REM-LABEL: test_memcpy
; CHECKO2UNSANDBOXEDREM-LABEL: test_memcpy
......@@ -136,9 +121,7 @@ entry:
ret void
}
; CHECK-LABEL: test_memcpy_const_len_align
; CHECK: call -4
; CALLTARGETS-LABEL: test_memcpy_const_len_align
; CALLTARGETS: .long memcpy
; CHECK: call memcpy
define void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) {
entry:
......@@ -149,9 +132,7 @@ entry:
ret void
}
; CHECK-LABEL: test_memmove
; CHECK: call -4
; CALLTARGETS-LABEL: test_memmove
; CALLTARGETS: .long memmove
; CHECK: call memmove
define void @test_memmove_const_len_align(i32 %iptr_dst, i32 %iptr_src) {
entry:
......@@ -162,9 +143,7 @@ entry:
ret void
}
; CHECK-LABEL: test_memmove_const_len_align
; CHECK: call -4
; CALLTARGETS-LABEL: test_memmove_const_len_align
; CALLTARGETS: .long memmove
; CHECK: call memmove
define void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) {
entry:
......@@ -176,9 +155,7 @@ entry:
}
; CHECK-LABEL: test_memset
; CHECK: movzx
; CHECK: call -4
; CALLTARGETS-LABEL: test_memset
; CALLTARGETS: .long memset
; CHECK: call memset
define void @test_memset_const_len_align(i32 %iptr_dst, i32 %wide_val) {
entry:
......@@ -190,9 +167,7 @@ entry:
}
; CHECK-LABEL: test_memset_const_len_align
; CHECK: movzx
; CHECK: call -4
; CALLTARGETS-LABEL: test_memset_const_len_align
; CALLTARGETS: .long memset
; CHECK: call memset
define void @test_memset_const_val(i32 %iptr_dst, i32 %len) {
entry:
......@@ -203,9 +178,7 @@ entry:
; CHECK-LABEL: test_memset_const_val
; Make sure the argument is legalized (can't movzx reg, 0).
; CHECK: movzx {{.*}}, {{[^0]}}
; CHECK: call -4
; CALLTARGETS-LABEL: test_memset_const_val
; CALLTARGETS: .long memset
; CHECK: call memset
define i32 @test_setjmplongjmp(i32 %iptr_env) {
......@@ -223,14 +196,11 @@ NonZero:
ret i32 1
}
; CHECK-LABEL: test_setjmplongjmp
; CHECK: call -4
; CHECK: call -4
; CHECK: call setjmp
; CHECK: call longjmp
; CHECKO2REM-LABEL: test_setjmplongjmp
; CHECKO2REM: call -4
; CHECKO2REM: call -4
; CALLTARGETS-LABEL: test_setjmplongjmp
; CALLTARGETS: .long setjmp
; CALLTARGETS: .long longjmp
; CHECKO2REM: call setjmp
; CHECKO2REM: call longjmp
define i32 @test_setjmp_unused(i32 %iptr_env, i32 %i_other) {
entry:
......@@ -241,9 +211,7 @@ entry:
; Don't consider setjmp side-effect free, so it's not eliminated if
; result unused.
; CHECKO2REM-LABEL: test_setjmp_unused
; CHECKO2REM: call -4
; CALLTARGETS-LABEL: test_setjmp_unused
; CALLTARGETS: .long setjmp
; CHECKO2REM: call setjmp
define float @test_sqrt_float(float %x, i32 %iptr) {
entry:
......@@ -449,9 +417,7 @@ entry:
ret i32 %r
}
; CHECK-LABEL: test_popcount_32
; CHECK: call -4
; CALLTARGETS-LABEL: test_popcount_32
; CALLTARGETS: .long __popcountsi2
; CHECK: call __popcountsi2
define i64 @test_popcount_64(i64 %x) {
entry:
......@@ -459,12 +425,10 @@ entry:
ret i64 %r
}
; CHECK-LABEL: test_popcount_64
; CHECK: call -4
; CHECK: call __popcountdi2
; __popcountdi2 only returns a 32-bit result, so clear the upper bits of
; the return value just in case.
; CHECK: mov {{.*}}, 0
; CALLTARGETS-LABEL: test_popcount_64
; CALLTARGETS: .long __popcountdi2
define i32 @test_popcount_64_ret_i32(i64 %x) {
......@@ -475,10 +439,8 @@ entry:
}
; If there is a trunc, then the mov {{.*}}, 0 is dead and gets optimized out.
; CHECKO2REM-LABEL: test_popcount_64_ret_i32
; CHECKO2REM: call -4
; CHECKO2REM: call __popcountdi2
; CHECKO2REM-NOT: mov {{.*}}, 0
; CALLTARGETS-LABEL: test_popcount_64_ret_i32
; CALLTARGETS: .long __popcountdi2
define void @test_stacksave_noalloca() {
entry:
......
......@@ -4,13 +4,13 @@
; Don't use integrated-as because this currently depends on the # variant
; assembler comment.
; RUN: %p2i -i %s -a -rng-seed=1 -nop-insertion -nop-insertion-percentage=50 \
; RUN: %p2i -i %s -a -sz-seed=1 -nop-insertion -nop-insertion-percentage=50 \
; RUN: -max-nops-per-instruction=1 -integrated-as=false \
; RUN: | FileCheck %s --check-prefix=PROB50
; RUN: %p2i -i %s -a -rng-seed=1 -nop-insertion -nop-insertion-percentage=90 \
; RUN: %p2i -i %s -a -sz-seed=1 -nop-insertion -nop-insertion-percentage=90 \
; RUN: -max-nops-per-instruction=1 -integrated-as=false \
; RUN: | FileCheck %s --check-prefix=PROB90
; RUN: %p2i -i %s -a -rng-seed=1 -nop-insertion -nop-insertion-percentage=50 \
; RUN: %p2i -i %s -a -sz-seed=1 -nop-insertion -nop-insertion-percentage=50 \
; RUN: -max-nops-per-instruction=2 -integrated-as=false \
; RUN: | FileCheck %s --check-prefix=MAXNOPS2
......
......@@ -33,7 +33,7 @@ define float @undef_float() {
entry:
ret float undef
; CHECK-LABEL: undef_float
; CHECK: fld dword ptr [4]
; CHECK: fld dword ptr [.L$float$0]
}
define <4 x i1> @undef_v4i1() {
......@@ -186,7 +186,7 @@ entry:
%val = insertelement <4 x float> %arg, float undef, i32 0
ret <4 x float> %val
; CHECK-LABEL: vector_insertelement_arg2
; CHECK: movss {{.*}}, dword ptr [4]
; CHECK: movss {{.*}}, dword ptr [.L$float$0]
}
define float @vector_extractelement_v4f32_index_0() {
......
; This tests the basic structure of the Unreachable instruction.
; TODO(jvoung): fix extra "CALLTARGETS" run. The llvm-objdump symbolizer
; doesn't know how to symbolize non-section-local functions.
; The newer LLVM 3.6 one does work, but watch out for other bugs.
; RUN: %p2i -i %s -a -O2 --verbose none \
; RUN: | FileCheck --check-prefix=CALLTARGETS %s
; RUN: %p2i -i %s -a -O2 --verbose none \
; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
......@@ -27,10 +21,8 @@ return: ; preds = %entry
}
; CHECK-LABEL: divide
; CALLTARGETS-LABEL: divide
; CHECK: cmp
; CHECK: call -4
; CALLTARGETS: .long ice_unreachable
; CHECK: call ice_unreachable
; CHECK: cdq
; CHECK: idiv
; CHECK: ret
......@@ -167,7 +167,7 @@ entry:
; CHECK: movups xmm1, xmmword ptr [esp + 112]
; CHECK: movups xmm2, xmmword ptr [esp + 96]
; CHECK: movups xmm3, xmmword ptr [esp + 80]
; CHECK: call -4
; CHECK: call VectorArgs
; CHECK-NEXT: add esp, 32
; OPTM1-LABEL: test_passing_vectors:
......@@ -180,7 +180,7 @@ entry:
; OPTM1: movups xmm1, xmmword ptr {{.*}}
; OPTM1: movups xmm2, xmmword ptr {{.*}}
; OPTM1: movups xmm3, xmmword ptr {{.*}}
; OPTM1: call -4
; OPTM1: call VectorArgs
; OPTM1-NEXT: add esp, 32
}
......@@ -203,7 +203,7 @@ entry:
; CHECK: movups xmm1, xmmword ptr [esp + 160]
; CHECK: movups xmm2, xmmword ptr [esp + 144]
; CHECK: movups xmm3, xmmword ptr [esp + 128]
; CHECK: call -4
; CHECK: call InterspersedVectorArgs
; CHECK-NEXT: add esp, 80
; CHECK: ret
......@@ -217,7 +217,7 @@ entry:
; OPTM1: movups xmm1, xmmword ptr {{.*}}
; OPTM1: movups xmm2, xmmword ptr {{.*}}
; OPTM1: movups xmm3, xmmword ptr {{.*}}
; OPTM1: call -4
; OPTM1: call InterspersedVectorArgs
; OPTM1-NEXT: add esp, 80
; OPTM1: ret
}
......@@ -233,15 +233,15 @@ entry:
%result2 = call <4 x float> @VectorReturn(<4 x float> %result)
ret void
; CHECK-LABEL: test_receiving_vectors:
; CHECK: call -4
; CHECK: call VectorReturn
; CHECK-NOT: movups xmm0
; CHECK: call -4
; CHECK: call VectorReturn
; CHECK: ret
; OPTM1-LABEL: test_receiving_vectors:
; OPTM1: call -4
; OPTM1: call VectorReturn
; OPTM1: movups {{.*}}, xmm0
; OPTM1: movups xmm0, {{.*}}
; OPTM1: call -4
; OPTM1: call VectorReturn
; OPTM1: ret
}
; This test checks support for vector arithmetic.
; TODO(jvoung): fix extra "CALLTARGETS" run. The llvm-objdump symbolizer
; doesn't know how to symbolize non-section-local functions.
; The newer LLVM 3.6 one does work, but watch out for other bugs.
; RUN: %p2i -i %s -a -O2 --verbose none\
; RUN: | FileCheck --check-prefix=CALLTARGETS %s
; RUN: %p2i -i %s -a -O2 --verbose none \
; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
......@@ -58,15 +52,10 @@ entry:
%res = frem <4 x float> %arg0, %arg1
ret <4 x float> %res
; CHECK-LABEL: test_frem:
; CALLTARGETS-LABEL: test_frem:
; CHECK: -4
; CHECK: -4
; CHECK: -4
; CHECK: -4
; CALLTARGETS: fmodf
; CALLTARGETS: fmodf
; CALLTARGETS: fmodf
; CALLTARGETS: fmodf
; CHECK: fmodf
; CHECK: fmodf
; CHECK: fmodf
; CHECK: fmodf
}
define <16 x i8> @test_add_v16i8(<16 x i8> %arg0, <16 x i8> %arg1) {
......
; This file tests bitcasts of vector type. For most operations, these
; should be lowered to a no-op on -O2.
; TODO(jvoung): fix extra "CALLTARGETS" run. The llvm-objdump symbolizer
; doesn't know how to symbolize non-section-local functions.
; The newer LLVM 3.6 one does work, but watch out for other bugs.
; RUN: %p2i -i %s --args -O2 --verbose none \
; RUN: | FileCheck --check-prefix=CALLTARGETS %s
; RUN: %p2i -i %s --args -O2 --verbose none \
; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
; RUN: | llvm-objdump -d -symbolize -x86-asm-syntax=intel - | FileCheck %s
......@@ -165,9 +159,7 @@ entry:
ret i8 %res
; CHECK-LABEL: test_bitcast_v8i1_to_i8:
; CALLTARGETS-LABEL: test_bitcast_v8i1_to_i8:
; CHECK: call -4
; CALLTARGETS: .long Sz_bitcast_v8i1_to_i8
; CHECK: call Sz_bitcast_v8i1_to_i8
; OPTM1-LABEL: test_bitcast_v8i1_to_i8:
; OPMT1: call -4
......@@ -179,9 +171,7 @@ entry:
ret i16 %res
; CHECK-LABEL: test_bitcast_v16i1_to_i16:
; CALLTARGETS-LABEL: test_bitcast_v16i1_to_i16:
; CHECK: call -4
; CALLTARGETS: .long Sz_bitcast_v16i1_to_i16
; CHECK: call Sz_bitcast_v16i1_to_i16
; OPTM1-LABEL: test_bitcast_v16i1_to_i16:
; OPMT1: call -4
......@@ -194,12 +184,10 @@ entry:
ret <8 x i1> %res
; CHECK-LABEL: test_bitcast_i8_to_v8i1:
; CALLTARGETS-LABEL: test_bitcast_i8_to_v8i1
; CHECK: call -4
; CALLTARGETS: .long Sz_bitcast_i8_to_v8i1
; CHECK: call Sz_bitcast_i8_to_v8i1
; OPTM1-LABEL: test_bitcast_i8_to_v8i1:
; OPTM1: call -4
; OPTM1: call Sz_bitcast_i8_to_v8i1
}
define <16 x i1> @test_bitcast_i16_to_v16i1(i32 %arg) {
......@@ -209,10 +197,8 @@ entry:
ret <16 x i1> %res
; CHECK-LABEL: test_bitcast_i16_to_v16i1:
; CALLTARGETS-LABEL: test_bitcast_i16_to_v16i1
; CHECK: call -4
; CALLTARGETS: .long Sz_bitcast_i16_to_v16i1
; CHECK: call Sz_bitcast_i16_to_v16i1
; OPTM1-LABEL: test_bitcast_i16_to_v16i1:
; OPTM1: call -4
; OPTM1: call Sz_bitcast_i16_to_v16i1
}
; This file tests casting / conversion operations that apply to vector types.
; bitcast operations are in vector-bitcast.ll.
; TODO(jvoung): fix extra "CALLTARGETS" run. The llvm-objdump symbolizer
; doesn't know how to symbolize non-section-local functions.
; The newer LLVM 3.6 one does work, but watch out for other bugs.
; RUN: %p2i -i %s --args -O2 --verbose none \
; RUN: | FileCheck --check-prefix=CALLTARGETS %s
; RUN: %p2i -i %s --args -O2 --verbose none \
; RUN: | llvm-mc -triple=i686-none-nacl -filetype=obj \
; RUN: | llvm-objdump -d --symbolize -x86-asm-syntax=intel - | FileCheck %s
......@@ -143,9 +137,7 @@ entry:
ret <4 x i32> %res
; CHECK-LABEL: test_fptoui_v4f32_to_v4i32:
; CHECK: call -4
; CALLTARGETS-LABEL: test_fptoui_v4f32_to_v4i32
; CALLTARGETS: .long Sz_fptoui_v4f32
; CHECK: call Sz_fptoui_v4f32
}
; [su]itofp operations
......@@ -165,7 +157,5 @@ entry:
ret <4 x float> %res
; CHECK-LABEL: test_uitofp_v4i32_to_v4f32:
; CHECK: call -4
; CALLTARGETS-LABEL: test_uitofp_v4i32_to_v4f32
; CALLTARGETS: .long Sz_uitofp_v4i32
; CHECK: call Sz_uitofp_v4i32
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment