chromium/third_party/tflite/src/tensorflow/lite/kernels/lstm.cc

/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <vector>

#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/lstm_eval.h"
#include "tensorflow/lite/kernels/lstm_shared.h"

namespace tflite {
namespace ops {
namespace builtin {
namespace lstm {

struct OpData {};

namespace full {
namespace {

// Named temporary tensors.
enum HybridTemporaryTensor {};

constexpr int kLedgersToAdd =;
constexpr int kInputToInputWeightsLedgerOffset =;
constexpr int kInputToForgetWeightsLedgerOffset =;
constexpr int kInputToCellWeightsLedgerOffset =;
constexpr int kInputToOutputWeightsLedgerOffset =;
constexpr int kRecurrentToInputWeightsLedgerOffset =;
constexpr int kRecurrentToForgetWeightsLedgerOffset =;
constexpr int kRecurrentToCellWeightsLedgerOffset =;
constexpr int kRecurrentToOutputWeightsLedgerOffset =;
constexpr int kProjectionWeightsLedgerOffset =;

TfLiteStatus make_ledger(const TfLiteSparsity* sparsity, TfLiteContext* context,
                         TfLiteTensor* ledger) {}

TfLiteStatus copy_ledger(const TfLiteSparsity* sparsity, TfLiteTensor* ledger) {}

TfLiteStatus PopulateQuantizedLstmParams8x8_16(
    TfLiteContext* context, TfLiteNode* node,
    lstm_eval::IntegerLstmParameter* integer_lstm_param) {}

TfLiteStatus PopulateQuantizedLstmParams8x8_8(
    TfLiteContext* context, TfLiteNode* node,
    lstm_eval::IntegerLstmParameter* integer_lstm_param) {}

}  // namespace

void* Init(TfLiteContext* context, const char* buffer, size_t length) {}

// LINT.IfChange
// Check that input tensor dimensions matches with each other.
TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
                                        TfLiteNode* node, int n_input,
                                        int n_output, int n_cell,
                                        bool use_layer_norm, bool is_integer) {}
// LINT.ThenChange(//tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/lstm.cc)

TfLiteStatus PrecomputeZeroPointTimesWeightWithBias(
    TfLiteContext* context, int32_t zero_point,
    const TfLiteTensor* weight_tensor, const TfLiteTensor* bias_tensor,
    std::unique_ptr<int32_t[]>* output) {}

TfLiteStatus PopulatePrecomputedZPTimesWeightsWithBias(TfLiteContext* context,
                                                       OpData* op_data,
                                                       TfLiteNode* node) {}

// Resize the output, state tensors based on the sizes of the input tensors.
// Allocate a temporary scratch tensor. Also check that the sizes of the input
// tensors match each other.
// LINT.IfChange
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {}
// LINT.ThenChange(//tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/lstm.cc)

// LINT.IfChange
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {}
// LINT.ThenChange(//tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/lstm.cc)

}  // namespace full

// For basic kernel (5-inputs).
namespace basic {

enum InputTensor {};

enum OutputTensor {};

void* Init(TfLiteContext* context, const char* buffer, size_t length) {}

TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {}

}  // namespace basic

void* Init(TfLiteContext* context, const char* buffer, size_t length) {}
void Free(TfLiteContext* context, void* buffer) {}

TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {}

TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {}

}  // namespace lstm

TfLiteRegistration* Register_LSTM() {}

}  // namespace builtin
}  // namespace ops
}  // namespace tflite