chromium/third_party/tflite/src/tensorflow/compiler/mlir/lite/tools/optimize/reduced_precision_metadata.h

/* Copyright 2024 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_TOOLS_OPTIMIZE_REDUCED_PRECISION_METADATA_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_TOOLS_OPTIMIZE_REDUCED_PRECISION_METADATA_H_

#include <cstdint>
#include <string>
#include <utility>

#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h"

namespace tflite {
namespace optimize {
static constexpr char kTfLiteReducedPrecisionKey[] =;

static constexpr char kTfLiteFloat16String[] =;
static constexpr char kTfLiteBfloat16String[] =;
static constexpr char kTfLiteFloat32String[] =;
static constexpr char kTfLiteAccumulationString[] =;

enum class ReducedPrecisionSupport : std::uint8_t {};

inline ReducedPrecisionSupport operator|(ReducedPrecisionSupport a,
                                         ReducedPrecisionSupport b) {}

inline ReducedPrecisionSupport& operator|=(ReducedPrecisionSupport& a,
                                           ReducedPrecisionSupport b) {}

inline ReducedPrecisionSupport operator&(ReducedPrecisionSupport a,
                                         ReducedPrecisionSupport b) {}

inline ReducedPrecisionSupport& operator&=(ReducedPrecisionSupport& a,
                                           ReducedPrecisionSupport b) {}

inline bool SupportsFP16Inference(const ReducedPrecisionSupport& mask) {}

inline bool SupportsBfloat16Inference(const ReducedPrecisionSupport& mask) {}

inline bool SupportsFP16Accumulation(const ReducedPrecisionSupport& mask) {}

inline bool SupportsFP32Accumulation(const ReducedPrecisionSupport& mask) {}

inline bool SupportsReducedPrecisionInference(
    const ReducedPrecisionSupport& mask) {}

inline bool SupportsEitherFP16OrFP32Accumulation(
    const ReducedPrecisionSupport& mask) {}

// Return the key-value pair for reduced precision support metadata.
// Example: mask = Float16Inference | Bfloat16Inference | Float32Accumulation;
// Returned value would be <"reduced_precision_support", "fp16bf16accfp32">.
inline std::pair<std::string, std::string> MetadataForReducedPrecisionSupport(
    const ReducedPrecisionSupport& mask) {}

}  // namespace optimize
}  // namespace tflite

#endif  // TENSORFLOW_COMPILER_MLIR_LITE_TOOLS_OPTIMIZE_REDUCED_PRECISION_METADATA_H_