Commit 86b60ef8 by Sagar Thakur Committed by Jim Stichnoth

[Subzero][MIPS32] Implements 64-bit shl, lshr, ashr for MIPS

R=stichnot@chromium.org Review URL: https://codereview.chromium.org/2359713003 . Patch from Sagar Thakur <sagar.thakur@imgtec.com>.
parent 0a7f99d9
......@@ -1149,7 +1149,7 @@ using InstMIPS32Mflo = InstMIPS32UnaryopGPR<InstMIPS32::Mflo>;
using InstMIPS32Mov_d = InstMIPS32TwoAddrFPR<InstMIPS32::Mov_d>;
using InstMIPS32Mov_s = InstMIPS32TwoAddrFPR<InstMIPS32::Mov_s>;
using InstMIPS32Movf = InstMIPS32MovConditional<InstMIPS32::Movf>;
using InstMIPS32Movn = InstMIPS32ThreeAddrGPR<InstMIPS32::Movn>;
using InstMIPS32Movn = InstMIPS32MovConditional<InstMIPS32::Movn>;
using InstMIPS32Movn_d = InstMIPS32ThreeAddrGPR<InstMIPS32::Movn_d>;
using InstMIPS32Movn_s = InstMIPS32ThreeAddrGPR<InstMIPS32::Movn_s>;
using InstMIPS32Movt = InstMIPS32MovConditional<InstMIPS32::Movt>;
......
......@@ -1730,30 +1730,22 @@ void TargetMIPS32::lowerInt64Arithmetic(const InstArithmetic *Instr,
Variable *Dest, Operand *Src0,
Operand *Src1) {
InstArithmetic::OpKind Op = Instr->getOp();
switch (Op) {
case InstArithmetic::Add:
case InstArithmetic::And:
case InstArithmetic::Or:
case InstArithmetic::Sub:
case InstArithmetic::Xor:
case InstArithmetic::Mul:
break;
default:
UnimplementedLoweringError(this, Instr);
return;
}
auto *DestLo = llvm::cast<Variable>(loOperand(Dest));
auto *DestHi = llvm::cast<Variable>(hiOperand(Dest));
Variable *Src0LoR = legalizeToReg(loOperand(Src0));
Variable *Src1LoR = legalizeToReg(loOperand(Src1));
Variable *Src0HiR = legalizeToReg(hiOperand(Src0));
Variable *Src1HiR = legalizeToReg(hiOperand(Src1));
Variable *Src0LoR = nullptr;
Variable *Src1LoR = nullptr;
Variable *Src0HiR = nullptr;
Variable *Src1HiR = nullptr;
switch (Op) {
case InstArithmetic::_num:
llvm::report_fatal_error("Unknown arithmetic operator");
return;
case InstArithmetic::Add: {
Src0LoR = legalizeToReg(loOperand(Src0));
Src1LoR = legalizeToReg(loOperand(Src1));
Src0HiR = legalizeToReg(hiOperand(Src0));
Src1HiR = legalizeToReg(hiOperand(Src1));
auto *T_Carry = I32Reg(), *T_Lo = I32Reg(), *T_Hi = I32Reg(),
*T_Hi2 = I32Reg();
_addu(T_Lo, Src0LoR, Src1LoR);
......@@ -1765,6 +1757,10 @@ void TargetMIPS32::lowerInt64Arithmetic(const InstArithmetic *Instr,
return;
}
case InstArithmetic::And: {
Src0LoR = legalizeToReg(loOperand(Src0));
Src1LoR = legalizeToReg(loOperand(Src1));
Src0HiR = legalizeToReg(hiOperand(Src0));
Src1HiR = legalizeToReg(hiOperand(Src1));
auto *T_Lo = I32Reg(), *T_Hi = I32Reg();
_and(T_Lo, Src0LoR, Src1LoR);
_mov(DestLo, T_Lo);
......@@ -1773,6 +1769,10 @@ void TargetMIPS32::lowerInt64Arithmetic(const InstArithmetic *Instr,
return;
}
case InstArithmetic::Sub: {
Src0LoR = legalizeToReg(loOperand(Src0));
Src1LoR = legalizeToReg(loOperand(Src1));
Src0HiR = legalizeToReg(hiOperand(Src0));
Src1HiR = legalizeToReg(hiOperand(Src1));
auto *T_Borrow = I32Reg(), *T_Lo = I32Reg(), *T_Hi = I32Reg(),
*T_Hi2 = I32Reg();
_subu(T_Lo, Src0LoR, Src1LoR);
......@@ -1784,6 +1784,10 @@ void TargetMIPS32::lowerInt64Arithmetic(const InstArithmetic *Instr,
return;
}
case InstArithmetic::Or: {
Src0LoR = legalizeToReg(loOperand(Src0));
Src1LoR = legalizeToReg(loOperand(Src1));
Src0HiR = legalizeToReg(hiOperand(Src0));
Src1HiR = legalizeToReg(hiOperand(Src1));
auto *T_Lo = I32Reg(), *T_Hi = I32Reg();
_or(T_Lo, Src0LoR, Src1LoR);
_mov(DestLo, T_Lo);
......@@ -1792,6 +1796,10 @@ void TargetMIPS32::lowerInt64Arithmetic(const InstArithmetic *Instr,
return;
}
case InstArithmetic::Xor: {
Src0LoR = legalizeToReg(loOperand(Src0));
Src1LoR = legalizeToReg(loOperand(Src1));
Src0HiR = legalizeToReg(hiOperand(Src0));
Src1HiR = legalizeToReg(hiOperand(Src1));
auto *T_Lo = I32Reg(), *T_Hi = I32Reg();
_xor(T_Lo, Src0LoR, Src1LoR);
_mov(DestLo, T_Lo);
......@@ -1802,6 +1810,10 @@ void TargetMIPS32::lowerInt64Arithmetic(const InstArithmetic *Instr,
case InstArithmetic::Mul: {
// TODO(rkotler): Make sure that mul has the side effect of clobbering
// LO, HI. Check for any other LO, HI quirkiness in this section.
Src0LoR = legalizeToReg(loOperand(Src0));
Src1LoR = legalizeToReg(loOperand(Src1));
Src0HiR = legalizeToReg(hiOperand(Src0));
Src1HiR = legalizeToReg(hiOperand(Src1));
auto *T_Lo = I32Reg(RegMIPS32::Reg_LO), *T_Hi = I32Reg(RegMIPS32::Reg_HI);
auto *T1 = I32Reg(), *T2 = I32Reg();
auto *TM1 = I32Reg(), *TM2 = I32Reg(), *TM3 = I32Reg(), *TM4 = I32Reg();
......@@ -1817,6 +1829,172 @@ void TargetMIPS32::lowerInt64Arithmetic(const InstArithmetic *Instr,
_mov(DestHi, TM4);
return;
}
case InstArithmetic::Shl: {
auto *T_Lo = I32Reg();
auto *T_Hi = I32Reg();
auto *T1_Lo = I32Reg();
auto *T1_Hi = I32Reg();
auto *T1 = I32Reg();
auto *T2 = I32Reg();
auto *T3 = I32Reg();
auto *T4 = I32Reg();
auto *T5 = I32Reg();
if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Src1)) {
Src0LoR = legalizeToReg(loOperand(Src0));
int64_t ShiftAmount = Const->getValue();
if (ShiftAmount == 1) {
Src0HiR = legalizeToReg(hiOperand(Src0));
_addu(T_Lo, Src0LoR, Src0LoR);
_sltu(T1, T_Lo, Src0LoR);
_addu(T2, T1, Src0HiR);
_addu(T_Hi, Src0HiR, T2);
} else if (ShiftAmount < INT32_BITS) {
Src0HiR = legalizeToReg(hiOperand(Src0));
_srl(T1, Src0LoR, INT32_BITS - ShiftAmount);
_sll(T2, Src0HiR, ShiftAmount);
_or(T_Hi, T1, T2);
_sll(T_Lo, Src0LoR, ShiftAmount);
} else if (ShiftAmount == INT32_BITS) {
_addiu(T_Lo, getZero(), 0);
_mov(T_Hi, Src0LoR);
} else if (ShiftAmount > INT32_BITS && ShiftAmount < 64) {
_sll(T_Hi, Src0LoR, ShiftAmount - INT32_BITS);
_addiu(T_Lo, getZero(), 0);
}
_mov(DestLo, T_Lo);
_mov(DestHi, T_Hi);
return;
}
Src0LoR = legalizeToReg(loOperand(Src0));
Src1LoR = legalizeToReg(loOperand(Src1));
Src0HiR = legalizeToReg(hiOperand(Src0));
_sllv(T1, Src0HiR, Src1LoR);
_not(T2, Src1LoR);
_srl(T3, Src0LoR, 1);
_srlv(T4, T3, T2);
_or(T_Hi, T1, T4);
_sllv(T_Lo, Src0LoR, Src1LoR);
_mov(T1_Hi, T_Hi);
_mov(T1_Lo, T_Lo);
_andi(T5, Src1LoR, INT32_BITS);
_movn(T1_Hi, T_Lo, T5);
_movn(T1_Lo, getZero(), T5);
_mov(DestHi, T1_Hi);
_mov(DestLo, T1_Lo);
return;
}
case InstArithmetic::Lshr: {
auto *T_Lo = I32Reg();
auto *T_Hi = I32Reg();
auto *T1_Lo = I32Reg();
auto *T1_Hi = I32Reg();
auto *T1 = I32Reg();
auto *T2 = I32Reg();
auto *T3 = I32Reg();
auto *T4 = I32Reg();
auto *T5 = I32Reg();
if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Src1)) {
Src0HiR = legalizeToReg(hiOperand(Src0));
int64_t ShiftAmount = Const->getValue();
if (ShiftAmount < INT32_BITS) {
Src0LoR = legalizeToReg(loOperand(Src0));
_sll(T1, Src0HiR, INT32_BITS - ShiftAmount);
_srl(T2, Src0LoR, ShiftAmount);
_or(T_Lo, T1, T2);
_srl(T_Hi, Src0HiR, ShiftAmount);
} else if (ShiftAmount == INT32_BITS) {
_mov(T_Lo, Src0HiR);
_addiu(T_Hi, getZero(), 0);
} else if (ShiftAmount > INT32_BITS && ShiftAmount < 64) {
_srl(T_Lo, Src0HiR, ShiftAmount - INT32_BITS);
_addiu(T_Hi, getZero(), 0);
}
_mov(DestLo, T_Lo);
_mov(DestHi, T_Hi);
return;
}
Src0LoR = legalizeToReg(loOperand(Src0));
Src1LoR = legalizeToReg(loOperand(Src1));
Src0HiR = legalizeToReg(hiOperand(Src0));
_srlv(T1, Src0LoR, Src1LoR);
_not(T2, Src1LoR);
_sll(T3, Src0HiR, 1);
_sllv(T4, T3, T2);
_or(T_Lo, T1, T4);
_srlv(T_Hi, Src0HiR, Src1LoR);
_mov(T1_Hi, T_Hi);
_mov(T1_Lo, T_Lo);
_andi(T5, Src1LoR, INT32_BITS);
_movn(T1_Lo, T_Hi, T5);
_movn(T1_Hi, getZero(), T5);
_mov(DestHi, T1_Hi);
_mov(DestLo, T1_Lo);
return;
}
case InstArithmetic::Ashr: {
auto *T_Lo = I32Reg();
auto *T_Hi = I32Reg();
auto *T1_Lo = I32Reg();
auto *T1_Hi = I32Reg();
auto *T1 = I32Reg();
auto *T2 = I32Reg();
auto *T3 = I32Reg();
auto *T4 = I32Reg();
auto *T5 = I32Reg();
auto *T6 = I32Reg();
if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Src1)) {
Src0HiR = legalizeToReg(hiOperand(Src0));
int64_t ShiftAmount = Const->getValue();
if (ShiftAmount < INT32_BITS) {
Src0LoR = legalizeToReg(loOperand(Src0));
_sll(T1, Src0HiR, INT32_BITS - ShiftAmount);
_srl(T2, Src0LoR, ShiftAmount);
_or(T_Lo, T1, T2);
_sra(T_Hi, Src0HiR, ShiftAmount);
} else if (ShiftAmount == INT32_BITS) {
_sra(T_Hi, Src0HiR, INT32_BITS - 1);
_mov(T_Lo, Src0HiR);
} else if (ShiftAmount > INT32_BITS && ShiftAmount < 64) {
_sra(T_Lo, Src0HiR, ShiftAmount - INT32_BITS);
_sra(T_Hi, Src0HiR, INT32_BITS - 1);
}
_mov(DestLo, T_Lo);
_mov(DestHi, T_Hi);
return;
}
Src0LoR = legalizeToReg(loOperand(Src0));
Src1LoR = legalizeToReg(loOperand(Src1));
Src0HiR = legalizeToReg(hiOperand(Src0));
_srlv(T1, Src0LoR, Src1LoR);
_not(T2, Src1LoR);
_sll(T3, Src0HiR, 1);
_sllv(T4, T3, T2);
_or(T_Lo, T1, T4);
_srav(T_Hi, Src0HiR, Src1LoR);
_mov(T1_Hi, T_Hi);
_mov(T1_Lo, T_Lo);
_andi(T5, Src1LoR, INT32_BITS);
_movn(T1_Lo, T_Hi, T5);
_sra(T6, Src0HiR, INT32_BITS - 1);
_movn(T1_Hi, T6, T5);
_mov(DestHi, T1_Hi);
_mov(DestLo, T1_Lo);
return;
}
default:
UnimplementedLoweringError(this, Instr);
return;
......@@ -3392,8 +3570,6 @@ void TargetMIPS32::prelowerPhis() {
void TargetMIPS32::postLower() {
if (Func->getOptLevel() == Opt_m1)
return;
// TODO(rkotler): Find two-address non-SSA instructions where Dest==Src0,
// and set the IsDestRedefined flag to keep liveness analysis consistent.
markRedefinitions();
Context.availabilityUpdate();
}
......
......@@ -520,6 +520,19 @@ entry:
; ARM32: lslge [[T2]], r0, [[T3]]
; ARM32: lsl r{{[0-9]+}}, r0, r2
; MIPS32-LABEL: shl64BitSigned
; MIPS32: sllv [[T1:.*]],[[A_HI:.*]],[[B_LO:.*]]
; MIPS32: nor [[T2:.*]],[[B_LO]],zero
; MIPS32: srl [[T3:.*]],[[A_LO:.*]],0x1
; MIPS32: srlv [[T4:.*]],[[T3]],[[T2]]
; MIPS32: or [[T_HI:.*]],[[T1]],[[T4]]
; MIPS32: sllv [[T_LO:.*]],[[A_LO]],[[B_LO]]
; MIPS32: move [[T1_LO:.*]],[[T_LO]]
; MIPS32: andi [[T5:.*]],[[B_LO]],0x20
; MIPS32: movn [[T_HI]],[[T_LO]],[[T5]]
; MIPS32: movn [[T1_LO]],zero,[[T5]]
; MIPS32: move v1,[[T_HI]]
define internal i32 @shl64BitSignedTrunc(i64 %a, i64 %b) {
entry:
%shl = shl i64 %a, %b
......@@ -567,6 +580,19 @@ entry:
; ARM32: lslge
; ARM32: lsl
; MIPS32-LABEL: shl64BitUnsigned
; MIPS32: sllv [[T1:.*]],[[A_HI:.*]],[[B_LO:.*]]
; MIPS32: nor [[T2:.*]],[[B_LO]],zero
; MIPS32: srl [[T3:.*]],[[A_LO:.*]],0x1
; MIPS32: srlv [[T4:.*]],[[T3]],[[T2]]
; MIPS32: or [[T_HI:.*]],[[T1]],[[T4]]
; MIPS32: sllv [[T_LO:.*]],[[A_LO]],[[B_LO]]
; MIPS32: move [[T1_LO:.*]],[[T_LO]]
; MIPS32: andi [[T5:.*]],[[B_LO]],0x20
; MIPS32: movn [[T_HI]],[[T_LO]],[[T5]]
; MIPS32: movn [[T1_LO]],zero,[[T5]]
; MIPS32: move v1,[[T_HI]]
define internal i64 @shr64BitSigned(i64 %a, i64 %b) {
entry:
%shr = ashr i64 %a, %b
......@@ -595,6 +621,19 @@ entry:
; ARM32: asrge r{{[0-9]+}}, r{{[0-9]+}}, [[T2]]
; ARM32: asr r{{[0-9]+}}, r{{[0-9]+}}, r{{[0-9]+}}
; MIPS32-LABEL: shr64BitSigned
; MIPS32: srlv [[T1:.*]],[[A_LO:.*]],[[B_LO:.*]]
; MIPS32: nor [[T2:.*]],[[B_LO]],zero
; MIPS32: sll [[T3:.*]],[[A_HI:.*]],0x1
; MIPS32: sllv [[T4:.*]],[[T3]],[[T2]]
; MIPS32: or [[T_LO:.*]],[[T1]],[[T4]]
; MIPS32: srav [[T_HI:.*]],[[A_HI]],[[B_LO]]
; MIPS32: move [[T_HI1:.*]],[[T_HI]]
; MIPS32: andi [[T5:.*]],[[B_LO]],0x20
; MIPS32: movn [[T_LO1:.*]],[[T_HI]],[[T5]]
; MIPS32: sra [[T6:.*]],[[A_HI]],0x1f
; MIPS32: movn [[T_HI1]],[[T6]],[[T5]]
define internal i32 @shr64BitSignedTrunc(i64 %a, i64 %b) {
entry:
%shr = ashr i64 %a, %b
......@@ -648,6 +687,18 @@ entry:
; ARM32: lsrge
; ARM32: lsr
; MIPS32-LABEL: shr64BitUnsigned
; MIPS32: srlv [[T1:.*]],[[A_LO:.*]],[[B_LO:.*]]
; MIPS32: nor [[T2:.*]],[[B_LO]],zero
; MIPS32: sll [[T3:.*]],[[A_HI:.*]],0x1
; MIPS32: sllv [[T4:.*]],[[T3]],[[T2]]
; MIPS32: or [[T_LO:.*]],[[T1]],[[T4]]
; MIPS32: srlv [[T_HI:.*]],[[A_HI]],[[B_LO]]
; MIPS32: move [[T_HI1:.*]],[[T_HI]]
; MIPS32: andi [[T5:.*]],[[B_LO]],0x20
; MIPS32: movn [[T_LO1:.*]],[[T_HI]],[[T5]]
; MIPS32: movn [[T_HI1]],zero,[[T5]]
define internal i32 @shr64BitUnsignedTrunc(i64 %a, i64 %b) {
entry:
%shr = lshr i64 %a, %b
......
......@@ -28,13 +28,13 @@
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target mips32 -i %s --args -O2 --skip-unimplemented \
; RUN: | %if --need=target_MIPS32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix MIPS32 %s
; RUN: --command FileCheck --check-prefix MIPS32-O2 --check-prefix MIPS32 %s
; RUN: %if --need=target_MIPS32 --need=allow_dump \
; RUN: --command %p2i --filetype=asm --assemble \
; RUN: --disassemble --target mips32 -i %s --args -Om1 --skip-unimplemented \
; RUN: | %if --need=target_MIPS32 --need=allow_dump \
; RUN: --command FileCheck --check-prefix MIPS32 %s
; RUN: --command FileCheck --check-prefix MIPS32-OM1 --check-prefix MIPS32 %s
@i1 = internal global [4 x i8] zeroinitializer, align 4
@i2 = internal global [4 x i8] zeroinitializer, align 4
......@@ -149,6 +149,11 @@ entry:
}
; CHECK-LABEL: shlImm64One
; CHECK: shl {{.*}},1
; MIPS32-LABEL: shlImm64One
; MIPS32: addu [[T_LO:.*]],[[VAL_LO:.*]],[[VAL_LO]]
; MIPS32: sltu [[T1:.*]],[[T_LO]],[[VAL_LO]]
; MIPS32: addu [[T2:.*]],[[T1]],[[VAL_HI:.*]]
; MIPS32: addu {{.*}},[[VAL_HI]],[[T2]]
define internal i64 @shlImm64LessThan32(i64 %val) {
entry:
......@@ -157,6 +162,11 @@ entry:
}
; CHECK-LABEL: shlImm64LessThan32
; CHECK: shl {{.*}},0x4
; MIPS32-LABEL: shlImm64LessThan32
; MIPS32: srl [[T1:.*]],[[VAL_LO:.*]],0x1c
; MIPS32: sll [[T2:.*]],{{.*}},0x4
; MIPS32: or {{.*}},[[T1]],[[T2]]
; MIPS32: sll {{.*}},[[VAL_LO]],0x4
define internal i64 @shlImm64Equal32(i64 %val) {
entry:
......@@ -165,6 +175,11 @@ entry:
}
; CHECK-LABEL: shlImm64Equal32
; CHECK-NOT: shl
; MIPS32-LABEL: shlImm64Equal32
; MIPS32: li {{.*}},0
; MIPS32-O2: move
; MIPS32-OM1: sw
; MIPS32-OM1: lw
define internal i64 @shlImm64GreaterThan32(i64 %val) {
entry:
......@@ -173,6 +188,9 @@ entry:
}
; CHECK-LABEL: shlImm64GreaterThan32
; CHECK: shl {{.*}},0x8
; MIPS32-LABEL: shlImm64GreaterThan32
; MIPS32: sll {{.*}},{{.*}},0x8
; MIPS32: li {{.*}},0
define internal i64 @lshrImm64One(i64 %val) {
entry:
......@@ -181,6 +199,11 @@ entry:
}
; CHECK-LABEL: lshrImm64One
; CHECK: shr {{.*}},1
; MIPS32-LABEL: lshrImm64One
; MIPS32: sll [[T1:.*]],[[VAL_HI:.*]],0x1f
; MIPS32: srl [[T2:.*]],{{.*}},0x1
; MIPS32: or {{.*}},[[T1]],[[T2]]
; MIPS32: srl {{.*}},[[VAL_HI]],0x1
define internal i64 @lshrImm64LessThan32(i64 %val) {
entry:
......@@ -190,6 +213,11 @@ entry:
; CHECK-LABEL: lshrImm64LessThan32
; CHECK: shrd {{.*}},0x4
; CHECK: shr {{.*}},0x4
; MIPS32-LABEL: lshrImm64LessThan32
; MIPS32: sll [[T1:.*]],[[VAL_HI:.*]],0x1c
; MIPS32: srl [[T2:.*]],{{.*}},0x4
; MIPS32: or {{.*}},[[T1]],[[T2]]
; MIPS32: srl {{.*}},[[VAL_HI]],0x4
define internal i64 @lshrImm64Equal32(i64 %val) {
entry:
......@@ -198,6 +226,11 @@ entry:
}
; CHECK-LABEL: lshrImm64Equal32
; CHECK-NOT: shr
; MIPS32-LABEL: lshrImm64Equal32
; MIPS32: li {{.*}},0
; MIPS32-O2: move
; MIPS32-OM1: sw
; MIPS32-OM1: lw
define internal i64 @lshrImm64GreaterThan32(i64 %val) {
entry:
......@@ -207,6 +240,9 @@ entry:
; CHECK-LABEL: lshrImm64GreaterThan32
; CHECK-NOT: shrd
; CHECK: shr {{.*}},0x8
; MIPS32-LABEL: lshrImm64GreaterThan32
; MIPS32: srl {{.*}},{{.*}},0x8
; MIPS32: li {{.*}},0
define internal i64 @ashrImm64One(i64 %val) {
entry:
......@@ -216,6 +252,11 @@ entry:
; CHECK-LABEL: ashrImm64One
; CHECK: shrd {{.*}},0x1
; CHECK: sar {{.*}},1
; MIPS32-LABEL: ashrImm64One
; MIPS32: sll [[T1:.*]],[[VAL_HI:.*]],0x1f
; MIPS32: srl [[T2:.*]],{{.*}},0x1
; MIPS32: or {{.*}},[[T1]],[[T2]]
; MIPS32: sra {{.*}},[[VAL_HI]],0x1
define internal i64 @ashrImm64LessThan32(i64 %val) {
entry:
......@@ -225,6 +266,11 @@ entry:
; CHECK-LABEL: ashrImm64LessThan32
; CHECK: shrd {{.*}},0x4
; CHECK: sar {{.*}},0x4
; MIPS32-LABEL: ashrImm64LessThan32
; MIPS32: sll [[T1:.*]],[[VAL_HI:.*]],0x1c
; MIPS32: srl [[T2:.*]],{{.*}},0x4
; MIPS32: or {{.*}},[[T1]],[[T2]]
; MIPS32: sra {{.*}},[[VAL_HI]],0x4
define internal i64 @ashrImm64Equal32(i64 %val) {
entry:
......@@ -234,6 +280,11 @@ entry:
; CHECK-LABEL: ashrImm64Equal32
; CHECK: sar {{.*}},0x1f
; CHECK-NOT: shrd
; MIPS32-LABEL: ashrImm64Equal32
; MIPS32: sra {{.*}},[[VAL_HI:.*]],0x1f
; MIPS32-O2: move {{.*}},[[VAL_HI]]
; MIPS32-OM1: sw [[VAL_HI]],{{.*}}
; MIPS32-OM1: lw {{.*}},{{.*}}
define internal i64 @ashrImm64GreaterThan32(i64 %val) {
entry:
......@@ -243,3 +294,6 @@ entry:
; CHECK-LABEL: ashrImm64GreaterThan32
; CHECK: sar {{.*}},0x1f
; CHECK: shrd {{.*}},0x8
; MIPS32-LABEL: ashrImm64GreaterThan32
; MIPS32: sra {{.*}},[[VAL_HI:.*]],0x8
; MIPS32: sra {{.*}},[[VAL_HI]],0x1f
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment