// SPDX-License-Identifier: MIT /* * Copyright © 2021-2023 Intel Corporation */ #include "xe_mmio.h" #include <linux/delay.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/minmax.h> #include <linux/pci.h> #include <drm/drm_managed.h> #include <drm/drm_print.h> #include "regs/xe_bars.h" #include "regs/xe_regs.h" #include "xe_device.h" #include "xe_gt.h" #include "xe_gt_printk.h" #include "xe_gt_sriov_vf.h" #include "xe_macros.h" #include "xe_sriov.h" #include "xe_trace.h" static void tiles_fini(void *arg) { … } /* * On multi-tile devices, partition the BAR space for MMIO on each tile, * possibly accounting for register override on the number of tiles available. * Resulting memory layout is like below: * * .----------------------. <- tile_count * tile_mmio_size * | .... | * |----------------------| <- 2 * tile_mmio_size * | tile1->mmio.regs | * |----------------------| <- 1 * tile_mmio_size * | tile0->mmio.regs | * '----------------------' <- 0MB */ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) { … } /* * On top of all the multi-tile MMIO space there can be a platform-dependent * extension for each tile, resulting in a layout like below: * * .----------------------. <- ext_base + tile_count * tile_mmio_ext_size * | .... | * |----------------------| <- ext_base + 2 * tile_mmio_ext_size * | tile1->mmio_ext.regs | * |----------------------| <- ext_base + 1 * tile_mmio_ext_size * | tile0->mmio_ext.regs | * |======================| <- ext_base = tile_count * tile_mmio_size * | | * | mmio.regs | * | | * '----------------------' <- 0MB * * Set up the tile[]->mmio_ext pointers/sizes. */ static void mmio_extension_setup(struct xe_device *xe, size_t tile_mmio_size, size_t tile_mmio_ext_size) { … } int xe_mmio_probe_tiles(struct xe_device *xe) { … } static void mmio_fini(void *arg) { … } int xe_mmio_init(struct xe_device *xe) { … } static void mmio_flush_pending_writes(struct xe_gt *gt) { … } u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg) { … } u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg) { … } void xe_mmio_write32(struct xe_gt *gt, struct xe_reg reg, u32 val) { … } u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg) { … } u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr, u32 set) { … } int xe_mmio_write32_and_verify(struct xe_gt *gt, struct xe_reg reg, u32 val, u32 mask, u32 eval) { … } bool xe_mmio_in_range(const struct xe_gt *gt, const struct xe_mmio_range *range, struct xe_reg reg) { … } /** * xe_mmio_read64_2x32() - Read a 64-bit register as two 32-bit reads * @gt: MMIO target GT * @reg: register to read value from * * Although Intel GPUs have some 64-bit registers, the hardware officially * only supports GTTMMADR register reads of 32 bits or smaller. Even if * a readq operation may return a reasonable value, that violation of the * spec shouldn't be relied upon and all 64-bit register reads should be * performed as two 32-bit reads of the upper and lower dwords. * * When reading registers that may be changing (such as * counters), a rollover of the lower dword between the two 32-bit reads * can be problematic. This function attempts to ensure the upper dword has * stabilized before returning the 64-bit value. * * Note that because this function may re-read the register multiple times * while waiting for the value to stabilize it should not be used to read * any registers where read operations have side effects. * * Returns the value of the 64-bit register. */ u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg) { … } static int __xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic, bool expect_match) { … } /** * xe_mmio_wait32() - Wait for a register to match the desired masked value * @gt: MMIO target GT * @reg: register to read value from * @mask: mask to be applied to the value read from the register * @val: desired value after applying the mask * @timeout_us: time out after this period of time. Wait logic tries to be * smart, applying an exponential backoff until @timeout_us is reached. * @out_val: if not NULL, points where to store the last unmasked value * @atomic: needs to be true if calling from an atomic context * * This function polls for the desired masked value and returns zero on success * or -ETIMEDOUT if timed out. * * Note that @timeout_us represents the minimum amount of time to wait before * giving up. The actual time taken by this function can be a little more than * @timeout_us for different reasons, specially in non-atomic contexts. Thus, * it is possible that this function succeeds even after @timeout_us has passed. */ int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { … } /** * xe_mmio_wait32_not() - Wait for a register to return anything other than the given masked value * @gt: MMIO target GT * @reg: register to read value from * @mask: mask to be applied to the value read from the register * @val: value not to be matched after applying the mask * @timeout_us: time out after this period of time * @out_val: if not NULL, points where to store the last unmasked value * @atomic: needs to be true if calling from an atomic context * * This function works exactly like xe_mmio_wait32() with the exception that * @val is expected not to be matched. */ int xe_mmio_wait32_not(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us, u32 *out_val, bool atomic) { … }