/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2022 Advanced Micro Devices, Inc. * Authors: * Christian König <[email protected]> */ #ifndef __LINUX_DMA_FENCE_UNWRAP_H #define __LINUX_DMA_FENCE_UNWRAP_H struct dma_fence; /** * struct dma_fence_unwrap - cursor into the container structure * * Should be used with dma_fence_unwrap_for_each() iterator macro. */ struct dma_fence_unwrap { … }; struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head, struct dma_fence_unwrap *cursor); struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor); /** * dma_fence_unwrap_for_each - iterate over all fences in containers * @fence: current fence * @cursor: current position inside the containers * @head: starting point for the iterator * * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all * potential fences in them. If @head is just a normal fence only that one is * returned. */ #define dma_fence_unwrap_for_each(fence, cursor, head) … struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, struct dma_fence **fences, struct dma_fence_unwrap *cursors); /** * dma_fence_unwrap_merge - unwrap and merge fences * * All fences given as parameters are unwrapped and merged back together as flat * dma_fence_array. Useful if multiple containers need to be merged together. * * Implemented as a macro to allocate the necessary arrays on the stack and * account the stack frame size to the caller. * * Returns NULL on memory allocation failure, a dma_fence object representing * all the given fences otherwise. */ #define dma_fence_unwrap_merge(...) … #endif