chromium/v8/src/compiler/revectorizer.cc

// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/compiler/revectorizer.h"

#include "src/base/cpu.h"
#include "src/base/logging.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-observer.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/verifier.h"
#include "src/execution/isolate-inl.h"
#include "src/wasm/simd-shuffle.h"

namespace v8 {
namespace internal {
namespace compiler {

#define TRACE(...)

namespace {

#define SIMPLE_SIMD_OP

#define SIMD_SHIFT_OP

#define SIMD_SIGN_EXTENSION_CONVERT_OP

#define SIMD_SPLAT_OP

// Currently, only Load/ProtectedLoad/LoadTransfrom are supported.
// TODO(jiepan): add support for UnalignedLoad, LoadLane, LoadTrapOnNull
bool IsSupportedLoad(const Node* node) {}

#ifdef DEBUG
bool IsSupportedLoad(const ZoneVector<Node*>& node_group) {}
#endif

int64_t GetConstantValue(const Node* node) {}

int64_t GetMemoryOffsetValue(const Node* node) {}

// We want to combine load/store nodes with continuous memory address,
// for load/store node, input(0) is memory_start + offset,  input(1) is index,
// we currently use index as the address of the node, nodes with same index and
// continuous offset can be combined together.
Node* GetNodeAddress(const Node* node) {}

bool IsContinuousAccess(const ZoneVector<Node*>& node_group) {}

// Returns true if all of the nodes in node_group are constants.
bool AllConstant(const ZoneVector<Node*>& node_group) {}

// Returns true if all the addresses of the nodes in node_group are identical.
bool AllSameAddress(const ZoneVector<Node*>& nodes) {}

// Returns true if all of the nodes in node_group are identical.
// Splat opcode in WASM SIMD is used to create vector with identical lanes.
template <typename T>
bool IsSplat(const T& node_group) {}

// Some kinds of node (shuffle, s128const) will have different operator
// instances even if they have the same properties, we can't simply compare the
// operator's address. We should compare their opcode and properties.
V8_INLINE static bool OperatorCanBePacked(const Operator* lhs,
                                          const Operator* rhs) {}

// Returns true if all of the nodes in node_group have the same type.
bool AllPackableOperator(const ZoneVector<Node*>& node_group) {}

bool ShiftBySameScalar(const ZoneVector<Node*>& node_group) {}

bool IsSignExtensionOperation(IrOpcode::Value op) {}

bool MaybePackSignExtensionOp(const ZoneVector<Node*>& node_group) {}

class EffectChainIterator {};

void InsertAfter(EffectChainIterator& dest, EffectChainIterator& src) {}

}  // anonymous namespace

// Sort load/store node by offset
bool MemoryOffsetComparer::operator()(const Node* lhs, const Node* rhs) const {}

void PackNode::Print() const {}

bool SLPTree::CanBePacked(const ZoneVector<Node*>& node_group) {}

PackNode* SLPTree::NewPackNode(const ZoneVector<Node*>& node_group) {}

PackNode* SLPTree::NewPackNodeAndRecurs(const ZoneVector<Node*>& node_group,
                                        int start_index, int count,
                                        unsigned recursion_depth) {}

PackNode* SLPTree::GetPackNode(Node* node) {}

void SLPTree::PushStack(const ZoneVector<Node*>& node_group) {}

void SLPTree::PopStack() {}

bool SLPTree::OnStack(Node* node) {}

bool SLPTree::AllOnStack(const ZoneVector<Node*>& node_group) {}

bool SLPTree::StackTopIsPhi() {}

void SLPTree::ClearStack() {}

// Try to connect the nodes in |loads| by effect edges. This allows us to build
// |PackNode| without breaking effect dependency:
// Before: [Load1]->...->[Load2]->...->[Load3]->...->[Load4]
// After:  [Load1]->[Load2]->[Load3]->[Load4]
void SLPTree::TryReduceLoadChain(const ZoneVector<Node*>& loads) {}

bool SLPTree::IsSideEffectFreeLoad(const ZoneVector<Node*>& node_group) {}

PackNode* SLPTree::BuildTree(const ZoneVector<Node*>& roots) {}

PackNode* SLPTree::BuildTreeRec(const ZoneVector<Node*>& node_group,
                                unsigned recursion_depth) {}

void SLPTree::DeleteTree() {}

void SLPTree::Print(const char* info) {}

template <typename FunctionType>
void SLPTree::ForEach(FunctionType callback) {}

//////////////////////////////////////////////////////

Revectorizer::Revectorizer(Zone* zone, Graph* graph, MachineGraph* mcgraph,
                           SourcePositionTable* source_positions)
    :{}

bool Revectorizer::DecideVectorize() {}

void Revectorizer::SetEffectInput(PackNode* pnode, int index, Node*& input) {}

void Revectorizer::SetMemoryOpInputs(base::SmallVector<Node*, 2>& inputs,
                                     PackNode* pnode, int effect_index) {}

Node* Revectorizer::VectorizeTree(PackNode* pnode) {}

void Revectorizer::DetectCPUFeatures() {}

bool Revectorizer::TryRevectorize(const char* function) {}

void Revectorizer::UpdateSources() {}

void Revectorizer::CollectSeeds() {}

bool Revectorizer::ReduceStoreChains(
    ZoneMap<Node*, StoreNodeSet>* store_chains) {}

bool Revectorizer::ReduceStoreChain(const ZoneVector<Node*>& Stores) {}

void Revectorizer::PrintStores(ZoneMap<Node*, StoreNodeSet>* store_chains) {}

}  // namespace compiler
}  // namespace internal
}  // namespace v8