linux/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c

// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
 * Copyright 2020-2021 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */
#include <linux/types.h>
#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
#include "amdgpu_res_cursor.h"
#include "kfd_priv.h"
#include "kfd_svm.h"
#include "kfd_migrate.h"
#include "kfd_smi_events.h"

#ifdef dev_fmt
#undef dev_fmt
#endif
#define dev_fmt(fmt)

static uint64_t
svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
{}

static int
svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
		     dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
{}

/**
 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
 *
 * @adev: amdgpu device the sdma ring running
 * @sys: system DMA pointer to be copied
 * @vram: vram destination DMA pointer
 * @npages: number of pages to copy
 * @direction: enum MIGRATION_COPY_DIR
 * @mfence: output, sdma fence to signal after sdma is done
 *
 * ram address uses GART table continuous entries mapping to ram pages,
 * vram address uses direct mapping of vram pages, which must have npages
 * number of continuous pages.
 * GART update and sdma uses same buf copy function ring, sdma is splited to
 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
 * the last sdma finish fence which is returned to check copy memory is done.
 *
 * Context: Process context, takes and releases gtt_window_lock
 *
 * Return:
 * 0 - OK, otherwise error code
 */

static int
svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
			     uint64_t *vram, uint64_t npages,
			     enum MIGRATION_COPY_DIR direction,
			     struct dma_fence **mfence)
{}

/**
 * svm_migrate_copy_done - wait for memory copy sdma is done
 *
 * @adev: amdgpu device the sdma memory copy is executing on
 * @mfence: migrate fence
 *
 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
 * operations, this is the last sdma operation fence.
 *
 * Context: called after svm_migrate_copy_memory
 *
 * Return:
 * 0		- success
 * otherwise	- error code from dma fence signal
 */
static int
svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
{}

unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
{}

static void
svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
{}

static void
svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
{}

static unsigned long
svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
{}

static struct page *
svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
{}

static void svm_migrate_put_sys_page(unsigned long addr)
{}

static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
{}

static int
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
			 struct migrate_vma *migrate, struct dma_fence **mfence,
			 dma_addr_t *scratch, uint64_t ttm_res_offset)
{}

static long
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
			struct vm_area_struct *vma, uint64_t start,
			uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
{}

/**
 * svm_migrate_ram_to_vram - migrate svm range from system to device
 * @prange: range structure
 * @best_loc: the device to migrate to
 * @start_mgr: start page to migrate
 * @last_mgr: last page to migrate
 * @mm: the process mm structure
 * @trigger: reason of migration
 *
 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
 *
 * Return:
 * 0 - OK, otherwise error code
 */
static int
svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
			unsigned long start_mgr, unsigned long last_mgr,
			struct mm_struct *mm, uint32_t trigger)
{}

static void svm_migrate_page_free(struct page *page)
{}

static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
			struct migrate_vma *migrate, struct dma_fence **mfence,
			dma_addr_t *scratch, uint64_t npages)
{}

/**
 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
 *
 * @prange: svm range structure
 * @vma: vm_area_struct that range [start, end] belongs to
 * @start: range start virtual address in pages
 * @end: range end virtual address in pages
 * @node: kfd node device to migrate from
 * @trigger: reason of migration
 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
 *
 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
 *
 * Return:
 *   negative values - indicate error
 *   positive values or zero - number of pages got migrated
 */
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
		       struct vm_area_struct *vma, uint64_t start, uint64_t end,
		       uint32_t trigger, struct page *fault_page)
{}

/**
 * svm_migrate_vram_to_ram - migrate svm range from device to system
 * @prange: range structure
 * @mm: process mm, use current->mm if NULL
 * @start_mgr: start page need be migrated to sys ram
 * @last_mgr: last page need be migrated to sys ram
 * @trigger: reason of migration
 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback
 *
 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
 *
 * Return:
 * 0 - OK, otherwise error code
 */
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
			    unsigned long start_mgr, unsigned long last_mgr,
			    uint32_t trigger, struct page *fault_page)
{}

/**
 * svm_migrate_vram_to_vram - migrate svm range from device to device
 * @prange: range structure
 * @best_loc: the device to migrate to
 * @start: start page need be migrated to sys ram
 * @last: last page need be migrated to sys ram
 * @mm: process mm, use current->mm if NULL
 * @trigger: reason of migration
 *
 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
 *
 * migrate all vram pages in prange to sys ram, then migrate
 * [start, last] pages from sys ram to gpu node best_loc.
 *
 * Return:
 * 0 - OK, otherwise error code
 */
static int
svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
			unsigned long start, unsigned long last,
			struct mm_struct *mm, uint32_t trigger)
{}

int
svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
		    unsigned long start, unsigned long last,
		    struct mm_struct *mm, uint32_t trigger)
{}

/**
 * svm_migrate_to_ram - CPU page fault handler
 * @vmf: CPU vm fault vma, address
 *
 * Context: vm fault handler, caller holds the mmap read lock
 *
 * Return:
 * 0 - OK
 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
 */
static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
{}

static const struct dev_pagemap_ops svm_migrate_pgmap_ops =;

/* Each VRAM page uses sizeof(struct page) on system memory */
#define SVM_HMM_PAGE_STRUCT_SIZE(size)

int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
{}