#ifndef TENSORFLOW_COMPILER_MLIR_LITE_TOOLS_OPTIMIZE_REDUCED_PRECISION_METADATA_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_TOOLS_OPTIMIZE_REDUCED_PRECISION_METADATA_H_
#include <cstdint>
#include <string>
#include <utility>
#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h"
namespace tflite {
namespace optimize {
static constexpr char kTfLiteReducedPrecisionKey[] = …;
static constexpr char kTfLiteFloat16String[] = …;
static constexpr char kTfLiteBfloat16String[] = …;
static constexpr char kTfLiteFloat32String[] = …;
static constexpr char kTfLiteAccumulationString[] = …;
enum class ReducedPrecisionSupport : std::uint8_t { … };
inline ReducedPrecisionSupport operator|(ReducedPrecisionSupport a,
ReducedPrecisionSupport b) { … }
inline ReducedPrecisionSupport& operator|=(ReducedPrecisionSupport& a,
ReducedPrecisionSupport b) { … }
inline ReducedPrecisionSupport operator&(ReducedPrecisionSupport a,
ReducedPrecisionSupport b) { … }
inline ReducedPrecisionSupport& operator&=(ReducedPrecisionSupport& a,
ReducedPrecisionSupport b) { … }
inline bool SupportsFP16Inference(const ReducedPrecisionSupport& mask) { … }
inline bool SupportsBfloat16Inference(const ReducedPrecisionSupport& mask) { … }
inline bool SupportsFP16Accumulation(const ReducedPrecisionSupport& mask) { … }
inline bool SupportsFP32Accumulation(const ReducedPrecisionSupport& mask) { … }
inline bool SupportsReducedPrecisionInference(
const ReducedPrecisionSupport& mask) { … }
inline bool SupportsEitherFP16OrFP32Accumulation(
const ReducedPrecisionSupport& mask) { … }
inline std::pair<std::string, std::string> MetadataForReducedPrecisionSupport(
const ReducedPrecisionSupport& mask) { … }
}
}
#endif