/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2023 Isovalent */ #ifndef __BPF_MPROG_H #define __BPF_MPROG_H #include <linux/bpf.h> /* bpf_mprog framework: * * bpf_mprog is a generic layer for multi-program attachment. In-kernel users * of the bpf_mprog don't need to care about the dependency resolution * internals, they can just consume it with few API calls. Currently available * dependency directives are BPF_F_{BEFORE,AFTER} which enable insertion of * a BPF program or BPF link relative to an existing BPF program or BPF link * inside the multi-program array as well as prepend and append behavior if * no relative object was specified, see corresponding selftests for concrete * examples (e.g. tc_links and tc_opts test cases of test_progs). * * Usage of bpf_mprog_{attach,detach,query}() core APIs with pseudo code: * * Attach case: * * struct bpf_mprog_entry *entry, *entry_new; * int ret; * * // bpf_mprog user-side lock * // fetch active @entry from attach location * [...] * ret = bpf_mprog_attach(entry, &entry_new, [...]); * if (!ret) { * if (entry != entry_new) { * // swap @entry to @entry_new at attach location * // ensure there are no inflight users of @entry: * synchronize_rcu(); * } * bpf_mprog_commit(entry); * } else { * // error path, bail out, propagate @ret * } * // bpf_mprog user-side unlock * * Detach case: * * struct bpf_mprog_entry *entry, *entry_new; * int ret; * * // bpf_mprog user-side lock * // fetch active @entry from attach location * [...] * ret = bpf_mprog_detach(entry, &entry_new, [...]); * if (!ret) { * // all (*) marked is optional and depends on the use-case * // whether bpf_mprog_bundle should be freed or not * if (!bpf_mprog_total(entry_new)) (*) * entry_new = NULL (*) * // swap @entry to @entry_new at attach location * // ensure there are no inflight users of @entry: * synchronize_rcu(); * bpf_mprog_commit(entry); * if (!entry_new) (*) * // free bpf_mprog_bundle (*) * } else { * // error path, bail out, propagate @ret * } * // bpf_mprog user-side unlock * * Query case: * * struct bpf_mprog_entry *entry; * int ret; * * // bpf_mprog user-side lock * // fetch active @entry from attach location * [...] * ret = bpf_mprog_query(attr, uattr, entry); * // bpf_mprog user-side unlock * * Data/fast path: * * struct bpf_mprog_entry *entry; * struct bpf_mprog_fp *fp; * struct bpf_prog *prog; * int ret = [...]; * * rcu_read_lock(); * // fetch active @entry from attach location * [...] * bpf_mprog_foreach_prog(entry, fp, prog) { * ret = bpf_prog_run(prog, [...]); * // process @ret from program * } * [...] * rcu_read_unlock(); * * bpf_mprog locking considerations: * * bpf_mprog_{attach,detach,query}() must be protected by an external lock * (like RTNL in case of tcx). * * bpf_mprog_entry pointer can be an __rcu annotated pointer (in case of tcx * the netdevice has tcx_ingress and tcx_egress __rcu pointer) which gets * updated via rcu_assign_pointer() pointing to the active bpf_mprog_entry of * the bpf_mprog_bundle. * * Fast path accesses the active bpf_mprog_entry within RCU critical section * (in case of tcx it runs in NAPI which provides RCU protection there, * other users might need explicit rcu_read_lock()). The bpf_mprog_commit() * assumes that for the old bpf_mprog_entry there are no inflight users * anymore. * * The READ_ONCE()/WRITE_ONCE() pairing for bpf_mprog_fp's prog access is for * the replacement case where we don't swap the bpf_mprog_entry. */ #define bpf_mprog_foreach_tuple(entry, fp, cp, t) … #define bpf_mprog_foreach_prog(entry, fp, p) … #define BPF_MPROG_MAX … struct bpf_mprog_fp { … }; struct bpf_mprog_cp { … }; struct bpf_mprog_entry { … }; struct bpf_mprog_bundle { … }; struct bpf_tuple { … }; static inline struct bpf_mprog_entry * bpf_mprog_peer(const struct bpf_mprog_entry *entry) { … } static inline void bpf_mprog_bundle_init(struct bpf_mprog_bundle *bundle) { … } static inline void bpf_mprog_inc(struct bpf_mprog_entry *entry) { … } static inline void bpf_mprog_dec(struct bpf_mprog_entry *entry) { … } static inline int bpf_mprog_max(void) { … } static inline int bpf_mprog_total(struct bpf_mprog_entry *entry) { … } static inline bool bpf_mprog_exists(struct bpf_mprog_entry *entry, struct bpf_prog *prog) { … } static inline void bpf_mprog_mark_for_release(struct bpf_mprog_entry *entry, struct bpf_tuple *tuple) { … } static inline void bpf_mprog_complete_release(struct bpf_mprog_entry *entry) { … } static inline void bpf_mprog_revision_new(struct bpf_mprog_entry *entry) { … } static inline void bpf_mprog_commit(struct bpf_mprog_entry *entry) { … } static inline u64 bpf_mprog_revision(struct bpf_mprog_entry *entry) { … } static inline void bpf_mprog_entry_copy(struct bpf_mprog_entry *dst, struct bpf_mprog_entry *src) { … } static inline void bpf_mprog_entry_clear(struct bpf_mprog_entry *dst) { … } static inline void bpf_mprog_clear_all(struct bpf_mprog_entry *entry, struct bpf_mprog_entry **entry_new) { … } static inline void bpf_mprog_entry_grow(struct bpf_mprog_entry *entry, int idx) { … } static inline void bpf_mprog_entry_shrink(struct bpf_mprog_entry *entry, int idx) { … } static inline void bpf_mprog_read(struct bpf_mprog_entry *entry, u32 idx, struct bpf_mprog_fp **fp, struct bpf_mprog_cp **cp) { … } static inline void bpf_mprog_write(struct bpf_mprog_fp *fp, struct bpf_mprog_cp *cp, struct bpf_tuple *tuple) { … } int bpf_mprog_attach(struct bpf_mprog_entry *entry, struct bpf_mprog_entry **entry_new, struct bpf_prog *prog_new, struct bpf_link *link, struct bpf_prog *prog_old, u32 flags, u32 id_or_fd, u64 revision); int bpf_mprog_detach(struct bpf_mprog_entry *entry, struct bpf_mprog_entry **entry_new, struct bpf_prog *prog, struct bpf_link *link, u32 flags, u32 id_or_fd, u64 revision); int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr, struct bpf_mprog_entry *entry); static inline bool bpf_mprog_supported(enum bpf_prog_type type) { … } #endif /* __BPF_MPROG_H */