Commit 673a7fe5 by Nicolas Capens Committed by Nicolas Capens

Unify load/store operand accessors

Load and store instructions, as well as intrinsics which access memory, can now shared the same methods for accessing the memory address and data operands. Note that while this change introduces the potential for non-load/store instructions to have their operands accessed through getLoadAddress(), getStoreAddress(), or getData(), that risk isn't any greater than using the wrong getSrc() index, and would stick out as a mistake much more clearly. The advantage this change brings is that we no longer have to remember where the address and data operands are stored in sub-vector load/store intrinsics. In addition, there are no more overly verbose casts, and their cost is eliminated. Bug: b/179497998 Change-Id: I0d9208555e00b0d3053f7d3baca241fef2b8cbeb Reviewed-on: https://swiftshader-review.googlesource.com/c/SwiftShader/+/52531 Presubmit-Ready: Nicolas Capens <nicolascapens@google.com> Tested-by: 's avatarNicolas Capens <nicolascapens@google.com> Reviewed-by: 's avatarAntonio Maiorano <amaiorano@google.com>
parent 8d50b556
......@@ -41,9 +41,6 @@ private:
static const Ice::InstIntrinsic *asStoreSubVector(const Ice::Inst *instruction);
static bool isLoad(const Ice::Inst &instruction);
static bool isStore(const Ice::Inst &instruction);
static Ice::Operand *storeAddress(const Ice::Inst *instruction);
static Ice::Operand *loadAddress(const Ice::Inst *instruction);
static Ice::Operand *storeData(const Ice::Inst *instruction);
static std::size_t storeSize(const Ice::Inst *instruction);
static bool loadTypeMatchesStore(const Ice::Inst *load, const Ice::Inst *store);
......@@ -64,7 +61,7 @@ private:
{
LoadStoreInst(Ice::Inst *inst, bool isStore)
: inst(inst)
, address(isStore ? storeAddress(inst) : loadAddress(inst))
, address(isStore ? inst->getStoreAddress() : inst->getLoadAddress())
, isStore(isStore)
{
}
......@@ -233,7 +230,7 @@ void Optimizer::eliminateLoadsFollowingSingleStore()
if(addressUses.stores.size() == 1)
{
Ice::Inst *store = addressUses.stores[0];
Ice::Operand *storeValue = storeData(store);
Ice::Operand *storeValue = store->getData();
for(Ice::Inst *load = &*++store->getIterator(), *next = nullptr; load != next; next = load, load = &*++store->getIterator())
{
......@@ -242,7 +239,7 @@ void Optimizer::eliminateLoadsFollowingSingleStore()
continue;
}
if(loadAddress(load) != address)
if(load->getLoadAddress() != address)
{
continue;
}
......@@ -412,7 +409,7 @@ void Optimizer::optimizeStoresInSingleBasicBlock()
}
store = inst;
storeValue = storeData(store);
storeValue = store->getData();
unmatchedLoads = false;
}
else
......@@ -556,7 +553,7 @@ bool Optimizer::isDead(Ice::Inst *instruction)
}
else if(isStore(*instruction))
{
if(Ice::Variable *address = llvm::dyn_cast<Ice::Variable>(storeAddress(instruction)))
if(Ice::Variable *address = llvm::dyn_cast<Ice::Variable>(instruction->getStoreAddress()))
{
Ice::Inst *def = getDefinition(address);
......@@ -582,7 +579,7 @@ const Ice::InstIntrinsic *Optimizer::asLoadSubVector(const Ice::Inst *instructio
{
if(auto *instrinsic = llvm::dyn_cast<Ice::InstIntrinsic>(instruction))
{
if(instrinsic->getIntrinsicInfo().ID == Ice::Intrinsics::LoadSubVector)
if(instrinsic->getIntrinsicID() == Ice::Intrinsics::LoadSubVector)
{
return instrinsic;
}
......@@ -595,7 +592,7 @@ const Ice::InstIntrinsic *Optimizer::asStoreSubVector(const Ice::Inst *instructi
{
if(auto *instrinsic = llvm::dyn_cast<Ice::InstIntrinsic>(instruction))
{
if(instrinsic->getIntrinsicInfo().ID == Ice::Intrinsics::StoreSubVector)
if(instrinsic->getIntrinsicID() == Ice::Intrinsics::StoreSubVector)
{
return instrinsic;
}
......@@ -624,57 +621,6 @@ bool Optimizer::isStore(const Ice::Inst &instruction)
return asStoreSubVector(&instruction) != nullptr;
}
Ice::Operand *Optimizer::storeAddress(const Ice::Inst *instruction)
{
assert(isStore(*instruction));
if(auto *store = llvm::dyn_cast<Ice::InstStore>(instruction))
{
return store->getStoreAddress();
}
if(auto *storeSubVector = asStoreSubVector(instruction))
{
return storeSubVector->getSrc(1);
}
return nullptr;
}
Ice::Operand *Optimizer::loadAddress(const Ice::Inst *instruction)
{
assert(isLoad(*instruction));
if(auto *load = llvm::dyn_cast<Ice::InstLoad>(instruction))
{
return load->getLoadAddress();
}
if(auto *loadSubVector = asLoadSubVector(instruction))
{
return loadSubVector->getSrc(0);
}
return nullptr;
}
Ice::Operand *Optimizer::storeData(const Ice::Inst *instruction)
{
assert(isStore(*instruction));
if(auto *store = llvm::dyn_cast<Ice::InstStore>(instruction))
{
return store->getData();
}
if(auto *storeSubVector = asStoreSubVector(instruction))
{
return storeSubVector->getSrc(0);
}
return nullptr;
}
std::size_t Optimizer::storeSize(const Ice::Inst *store)
{
assert(isStore(*store));
......@@ -700,28 +646,24 @@ bool Optimizer::loadTypeMatchesStore(const Ice::Inst *load, const Ice::Inst *sto
}
assert(isLoad(*load) && isStore(*store));
assert(loadAddress(load) == storeAddress(store));
assert(load->getLoadAddress() == store->getStoreAddress());
if(auto *instStore = llvm::dyn_cast<Ice::InstStore>(store))
if(store->getData()->getType() != load->getDest()->getType())
{
if(auto *instLoad = llvm::dyn_cast<Ice::InstLoad>(load))
{
return instStore->getData()->getType() == instLoad->getDest()->getType();
}
return false;
}
if(auto *storeSubVector = asStoreSubVector(store))
{
if(auto *loadSubVector = asLoadSubVector(load))
{
// Check for matching type and sub-vector width.
return storeSubVector->getSrc(0)->getType() == loadSubVector->getDest()->getType() &&
llvm::cast<Ice::ConstantInteger32>(storeSubVector->getSrc(2))->getValue() ==
llvm::cast<Ice::ConstantInteger32>(loadSubVector->getSrc(1))->getValue();
// Check for matching sub-vector width.
return llvm::cast<Ice::ConstantInteger32>(storeSubVector->getSrc(2))->getValue() ==
llvm::cast<Ice::ConstantInteger32>(loadSubVector->getSrc(1))->getValue();
}
}
return false;
return true;
}
Optimizer::Uses *Optimizer::getUses(Ice::Operand *operand)
......@@ -797,14 +739,14 @@ void Optimizer::Uses::insert(Ice::Operand *value, Ice::Inst *instruction)
if(isLoad(*instruction))
{
if(value == loadAddress(instruction))
if(value == instruction->getLoadAddress())
{
loads.push_back(instruction);
}
}
else if(isStore(*instruction))
{
if(value == storeAddress(instruction))
if(value == instruction->getStoreAddress())
{
stores.push_back(instruction);
}
......
......@@ -113,6 +113,11 @@ public:
assert(!isDeleted());
Srcs[Index] = Replacement;
}
// Instructions which load data take their address in Src[0], while
// store instructions use Src[1] for the address and Src[0] for the data.
Operand *getLoadAddress() const { return getSrc(0); }
Operand *getStoreAddress() const { return getSrc(1); }
Operand *getData() const { return getSrc(0); }
bool isLastUse(const Operand *Src) const;
void spliceLivenessInfo(Inst *OrigInst, Inst *SpliceAssn);
......@@ -630,6 +635,7 @@ public:
}
Intrinsics::IntrinsicInfo getIntrinsicInfo() const { return Info; }
Intrinsics::IntrinsicID getIntrinsicID() const { return Info.ID; }
bool isMemoryWrite() const override {
return getIntrinsicInfo().IsMemoryWrite;
}
......@@ -655,7 +661,6 @@ public:
(void)Align;
return new (Func->allocate<InstLoad>()) InstLoad(Func, Dest, SourceAddr);
}
Operand *getLoadAddress() const { return getSrc(0); }
bool isMemoryWrite() const override { return false; }
void dump(const Cfg *Func) const override;
static bool classof(const Inst *Instr) { return Instr->getKind() == Load; }
......@@ -761,8 +766,6 @@ public:
(void)Align;
return new (Func->allocate<InstStore>()) InstStore(Func, Data, Addr);
}
Operand *getStoreAddress() const { return getSrc(1); }
Operand *getData() const { return getSrc(0); }
Variable *getRmwBeacon() const;
void setRmwBeacon(Variable *Beacon);
bool isMemoryWrite() const override { return true; }
......
......@@ -366,9 +366,9 @@ void TargetLowering::doAddressOpt() {
doAddressOptStore();
else if (auto *Intrinsic =
llvm::dyn_cast<InstIntrinsic>(&*Context.getCur())) {
if (Intrinsic->getIntrinsicInfo().ID == Intrinsics::LoadSubVector)
if (Intrinsic->getIntrinsicID() == Intrinsics::LoadSubVector)
doAddressOptLoadSubVector();
else if (Intrinsic->getIntrinsicInfo().ID == Intrinsics::StoreSubVector)
else if (Intrinsic->getIntrinsicID() == Intrinsics::StoreSubVector)
doAddressOptStoreSubVector();
}
Context.advanceCur();
......
......@@ -723,7 +723,7 @@ void TargetARM32::genTargetHelperCallFor(Inst *Instr) {
case Inst::Intrinsic: {
Variable *Dest = Instr->getDest();
auto *Intrinsic = llvm::cast<InstIntrinsic>(Instr);
Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicInfo().ID;
Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicID();
switch (ID) {
default:
return;
......@@ -5016,7 +5016,7 @@ void TargetARM32::postambleCtpop64(const InstCall *Instr) {
void TargetARM32::lowerIntrinsic(const InstIntrinsic *Instr) {
Variable *Dest = Instr->getDest();
Type DestTy = (Dest != nullptr) ? Dest->getType() : IceType_void;
Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID;
Intrinsics::IntrinsicID ID = Instr->getIntrinsicID();
switch (ID) {
case Intrinsics::AtomicFence:
case Intrinsics::AtomicFenceAll:
......
......@@ -582,7 +582,7 @@ void TargetMIPS32::genTargetHelperCallFor(Inst *Instr) {
}
case Inst::Intrinsic: {
auto *Intrinsic = llvm::cast<InstIntrinsic>(Instr);
Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicInfo().ID;
Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicID();
if (isVectorType(DestTy) && ID == Intrinsics::Fabs) {
Operand *Src0 = Intrinsic->getArg(0);
GlobalString FabsFloat = Ctx->getGlobalString("llvm.fabs.f32");
......@@ -605,7 +605,8 @@ void TargetMIPS32::genTargetHelperCallFor(Inst *Instr) {
Context.insert<InstExtractElement>(Op, Src0, Index);
auto *Res = Func->makeVariable(IceType_f32);
Variable *DestT = Func->makeVariable(IceType_v4f32);
auto *Intrinsic = Context.insert<InstIntrinsic>(1, Res, CallTarget, Info);
auto *Intrinsic =
Context.insert<InstIntrinsic>(1, Res, CallTarget, Info);
Intrinsic->addArg(Op);
Context.insert<InstInsertElement>(DestT, T, Res, Index);
T = DestT;
......@@ -4566,7 +4567,7 @@ void TargetMIPS32::lowerIntrinsic(const InstIntrinsic *Instr) {
Variable *Dest = Instr->getDest();
Type DestTy = (Dest == nullptr) ? IceType_void : Dest->getType();
Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID;
Intrinsics::IntrinsicID ID = Instr->getIntrinsicID();
switch (ID) {
case Intrinsics::AtomicLoad: {
assert(isScalarIntegerType(DestTy));
......
......@@ -850,7 +850,7 @@ template <typename TraitsType> void TargetX86Base<TraitsType>::doLoadOpt() {
// An AtomicLoad intrinsic qualifies as long as it has a valid memory
// ordering, and can be implemented in a single instruction (i.e., not
// i64 on x86-32).
Intrinsics::IntrinsicID ID = Intrin->getIntrinsicInfo().ID;
Intrinsics::IntrinsicID ID = Intrin->getIntrinsicID();
if (ID == Intrinsics::AtomicLoad &&
(Traits::Is64Bit || Intrin->getDest()->getType() != IceType_i64) &&
Intrinsics::isMemoryOrderValid(
......@@ -4127,7 +4127,7 @@ void TargetX86Base<TraitsType>::lowerInsertElement(
template <typename TraitsType>
void TargetX86Base<TraitsType>::lowerIntrinsic(const InstIntrinsic *Instr) {
switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicInfo().ID) {
switch (Intrinsics::IntrinsicID ID = Instr->getIntrinsicID()) {
case Intrinsics::AtomicCmpxchg: {
if (!Intrinsics::isMemoryOrderValid(
ID, getConstantMemoryOrder(Instr->getArg(3)),
......@@ -7612,7 +7612,7 @@ void TargetX86Base<TraitsType>::genTargetHelperCallFor(Inst *Instr) {
} else if (auto *Intrinsic = llvm::dyn_cast<InstIntrinsic>(Instr)) {
CfgVector<Type> ArgTypes;
Type ReturnType = IceType_void;
switch (Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicInfo().ID) {
switch (Intrinsics::IntrinsicID ID = Intrinsic->getIntrinsicID()) {
default:
return;
case Intrinsics::Ctpop: {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment