chromium/third_party/xnnpack/src/src/xnnpack/subgraph.h

// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

#pragma once

#include <stddef.h>
#include <stdint.h>

#include "xnnpack.h"
#include "xnnpack/allocation-type.h"
#include "xnnpack/cache.h"
#include "xnnpack/common.h"
#include "xnnpack/math.h"
#include "xnnpack/node-type.h"

#if defined(EMSCRIPTEN)
#include <emscripten/emscripten.h>
#elif XNN_PLATFORM_WINDOWS
#include <windows.h>
#else
#include <time.h>
#endif

#define XNN_MAX_INPUTS
#define XNN_MAX_OUTPUTS

#define XNN_INVALID_NODE_ID

#define XNN_MAX_OPERATOR_OBJECTS

/// Disable fusion of nodes in subgraph. Fusion is enabled by default, set this flag to turn it off.
#define XNN_FLAG_NO_OPERATOR_FUSION

#ifdef __cplusplus
extern "C" {
#endif

#ifdef XNN_SLINKY_ENABLED
struct xnn_value;
struct slinky_pipeline;
typedef struct slinky_pipeline* slinky_pipeline_t;
slinky_pipeline_t xnn_runtime_to_slinky_pipeline(xnn_runtime_t runtime);
void destroy_slinky_pipeline(slinky_pipeline_t pipeline);
enum xnn_status evaluate(slinky_pipeline_t p, struct xnn_value* const* input_values, size_t num_inputs, struct xnn_value* const* output_values, size_t num_outputs);
#endif

struct xnn_shape {};

enum xnn_value_type {};

enum xnn_layout_type {};

/// Abstraction for a collections of elements produced and consumed by nodes.
struct xnn_value {};


XNN_INLINE static bool xnn_value_is_external(const struct xnn_value* value) {}

XNN_INLINE static bool xnn_value_is_external_output(const struct xnn_value* value) {}

XNN_INLINE static bool xnn_value_is_external_input(const struct xnn_value* value) {}

XNN_INLINE static bool xnn_value_is_internal(const struct xnn_value* value) {}

XNN_INLINE static bool xnn_value_is_persistent(const struct xnn_value* value) {}

XNN_INLINE static bool xnn_value_is_valid(const struct xnn_value* value) {}

XNN_INLINE static bool xnn_value_is_static(const struct xnn_value* value) {}

struct xnn_node;
struct xnn_operator_data;

xnn_create_operator_fn;

xnn_reshape_operator_fn;

xnn_setup_operator_fn;

enum xnn_compute_type {};

struct xnn_node {};

#ifdef __MACH__
typedef uint64_t xnn_timestamp;
#elif __EMSCRIPTEN__
typedef double xnn_timestamp;
#elif XNN_PLATFORM_WINDOWS
typedef LARGE_INTEGER xnn_timestamp;
#else
xnn_timestamp;
#endif

struct xnn_operator_data {};

struct xnn_subgraph {};

/// Runtime is a combination of an execution plan for subgraph Nodes and a memory manager for subgraph Values.
struct xnn_runtime {};

struct xnn_value* xnn_subgraph_new_internal_value(xnn_subgraph_t subgraph);

struct xnn_node* xnn_subgraph_new_node(xnn_subgraph_t subgraph);

enum xnn_status xnn_subgraph_add_nodes(xnn_subgraph_t subgraph, size_t num_nodes);

// Get size of the tensor in bytes (based on dimensions of tensor).
size_t xnn_tensor_get_size(const struct xnn_value* value);

size_t xnn_tensor_get_size_by_id(xnn_subgraph_t subgraph, uint32_t value_id);

// Checks if a tensor shape is completely known.
bool xnn_tensor_shape_is_static(const struct xnn_value* value);

XNN_INLINE static size_t xnn_get_rounded_size(size_t size)
{}

// Returns the size of tensor rounded to appropriate extra bytes and allocation alignment.
XNN_INLINE static size_t xnn_tensor_get_rounded_size(const struct xnn_value* value)
{}

// Product of all shape dimensions
size_t xnn_shape_multiply_all_dims(
  const struct xnn_shape shape[1]);

// Product of all shape dimensions, except for the specified number of the last dimensions
size_t xnn_shape_multiply_batch_dims(
  const struct xnn_shape shape[1], size_t num_nonbatch_dims);

// Product of all shape dimensions, except for the last (channel) one
size_t xnn_shape_multiply_non_channel_dims(
  const struct xnn_shape shape[1]);

// Product of n leading dimensions.
size_t xnn_shape_multiply_leading_dims(
  const struct xnn_shape shape[1],
  size_t num_leading_dims);

// Product of trailing dimensions starting from start_dim.
size_t xnn_shape_multiply_trailing_dims(
  const struct xnn_shape shape[1],
  size_t start_dim);

// Get the size in bytes to hold dynamic quant params
size_t xnn_tensor_get_dynamic_quant_param_size(const struct xnn_value* value);

XNN_INLINE static size_t xnn_tensor_get_rounded_dynamic_quant_param_size(const struct xnn_value *value) {}


enum xnn_status xnn_subgraph_optimize(xnn_subgraph_t subgraph, uint32_t flags);

void xnn_subgraph_rewrite_for_nchw(xnn_subgraph_t subgraph);
// Rewrites subgraph for FP16, returns true if success, false if rewrite failed.
bool xnn_subgraph_rewrite_for_fp16(xnn_subgraph_t subgraph);

void xnn_node_clear(struct xnn_node* node);
void xnn_value_clear(struct xnn_value* value);

void xnn_value_copy(struct xnn_value* dst_value, const struct xnn_value* src_value);

void xnn_init_convert_node(
  struct xnn_node* node,
  enum xnn_compute_type compute_type,
  uint32_t input_id,
  uint32_t output_id,
  uint32_t flags);

struct xnn_workspace {};

void xnn_subgraph_analyze_consumers_and_producers(xnn_subgraph_t subgraph);

enum xnn_status resize_fully_connected_output_tensor(
  const struct xnn_operator_data* opdata,
  struct xnn_value* values,
  size_t num_values,
  size_t old_workspace_size,
  pthreadpool_t threadpool);

#ifdef __cplusplus
}  // extern "C"
#endif