chromium/third_party/tflite/src/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc

/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"

#include <algorithm>
#include <array>
#include <cinttypes>
#include <cmath>
#include <cstdint>
#include <cstring>
#include <functional>
#include <limits>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>

#include "xnnpack.h"  // from @XNNPACK
#include "Eigen/Core"  // from @eigen_archive
#include "pthreadpool.h"  // from @pthreadpool
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/delegates/xnnpack/quantization_util.h"
#include "tensorflow/lite/delegates/xnnpack/weight_cache.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/utils/sparsity_format_converter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/tools/optimize/reduced_precision_support.h"

struct TfLiteXNNPackDelegateWeightsCache;

namespace tflite {
namespace xnnpack {
namespace {

constexpr char kOdmlSDPA[] =;

template <typename T>
void SafeCopyCustomData(const TfLiteNode& node, T* target) {}

void CopyTensorDataInt32OrInt64(int64_t* dst, const TfLiteTensor& tensor,
                                size_t n) {}

xnn_datatype GetXNNPackDatatype(TfLiteContext* context,
                                const TfLiteTensor& tensor, int t) {}

std::vector<size_t> TfLiteDimensionsToXNNPackDimensions(
    const std::vector<int>& tflite_dims) {}

// Forward declaration.
TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate);

// hash_combine from smhasher/boost.
template <typename T>
inline void hash_combine(size_t seed, T v) {}

struct PairHash {};

// Variable tensors are tensors that can are persistent across graph
// invocations. A handle to a variable tensor is given by the VAR_HANDLE
// operation, the result of this operation is a tensor of type kTfLiteResource,
// which represents the name/id of a variable tensor. READ_VARIABLE (RV) and
// ASSIGN_VARIABLE (AV) access variable tensors using the result of VAR_HANDLE.
// XNNPACK does not materialize any resource tensor. In order for RV/AV to know
// which variable tensor it is accessing, we track:
// - the name in each VAR_HANDLE node,
// - the output tensor of VAR_HANDLE in each Subgraph
// - the input tensor of RV/AV in each Subgraph
// and match these up.
// Each unique name is given a "global variable id". The output tensor of
// VAR_HANDLE is mapped to this global variable id using its name.
// Then RV/AV's input resource tensor id is used to lookup the global variable
// id, and using that we get a pointer to the underlying buffer.
// This is performed in two pass because:
// - XNNPACK requires tensor declaration upfront and the dimensions are fixed
// - VAR_HANDLE node has no dimensions information, only RV/AV has it
// The two passes are:
// - PrepareOpsToDelegate will record a mapping of variable name to the global
// variable id and also record the dimensions based on RV/AV. This is called per
// subgraph in the model.
// - Subgraph::Create will actually define the tensors. This is called per
// subgraph in the model.
class VariableHolder {};

class Subgraph;

class Delegate {};

class Subgraph {};

TfLiteIntArray* Delegate::PrepareOpsToDelegate(TfLiteContext* context) {}

void* SubgraphInit(TfLiteContext* context, const char* buffer, size_t length) {}

TfLiteStatus SubgraphPrepare(TfLiteContext* context, TfLiteNode* node) {}

TfLiteStatus SubgraphInvoke(TfLiteContext* context, TfLiteNode* node) {}

void SubgraphFree(TfLiteContext* context, void* buffer) {}

const TfLiteRegistration kSubgraphRegistration =;

TfLiteStatus DelegatePrepare(TfLiteContext* context, TfLiteDelegate* delegate) {}

}  // namespace
}  // namespace xnnpack
}  // namespace tflite

TfLiteXNNPackDelegateWeightsCache* TfLiteXNNPackDelegateWeightsCacheCreate() {}

TfLiteXNNPackDelegateWeightsCache*
TfLiteXNNPackDelegateWeightsCacheCreateWithSize(size_t size) {}

bool TfLiteXNNPackDelegateWeightsCacheFinalizeSoft(
    TfLiteXNNPackDelegateWeightsCache* cache) {}

bool TfLiteXNNPackDelegateWeightsCacheFinalizeHard(
    TfLiteXNNPackDelegateWeightsCache* cache) {}

void TfLiteXNNPackDelegateWeightsCacheDelete(
    TfLiteXNNPackDelegateWeightsCache* cache) {}

TfLiteXNNPackDelegateOptions TfLiteXNNPackDelegateOptionsDefault() {}

TfLiteDelegate* TfLiteXNNPackDelegateCreate(
    const TfLiteXNNPackDelegateOptions* options) {}

TfLiteDelegate* TfLiteXNNPackDelegateCreateWithThreadpool(
    const TfLiteXNNPackDelegateOptions* options, TfLiteContext* context) {}

void* TfLiteXNNPackDelegateGetThreadPool(TfLiteDelegate* delegate) {}

const TfLiteXNNPackDelegateOptions* TfLiteXNNPackDelegateGetOptions(
    TfLiteDelegate* delegate) {}

int TfLiteXNNPackDelegateGetFlags(TfLiteDelegate* delegate) {}

void TfLiteXNNPackDelegateDelete(TfLiteDelegate* delegate) {}