#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <memory>
#include <vector>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace mirror_pad {
namespace {
const int kUnsetOffset = …;
template <typename T>
struct EvalData { … };
template <typename T>
inline void GetPadding(const T* data, int offset, int64_t* left_pad,
int64_t* right_pad) { … }
inline void GetPadding(const TfLiteTensor* padding_matrix, int dimension,
int64_t* left_pad, int64_t* right_pad) { … }
std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> GetPaddedOutputShape(
const TfLiteTensor* input, const TfLiteTensor* padding_matrix) { … }
inline int GetInputDimension(int padded_dimension, int left_pad, int right_pad,
int input_dim_size, int offset) { … }
template <typename T>
int GetFlatIndex(int index, EvalData<T>* eval_data) { … }
template <typename T>
struct MirrorPadWorkerTask : cpu_backend_threadpool::Task { … };
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { … }
void* Init(TfLiteContext* context, const char* buffer, size_t length) { … }
void Free(TfLiteContext* context, void* buffer) { … }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { … }
}
TfLiteRegistration* Register_MIRROR_PAD() { … }
}
}
}