linux/drivers/gpu/drm/i915/gvt/sched_policy.c

/*
 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * Authors:
 *    Anhua Xu
 *    Kevin Tian <[email protected]>
 *
 * Contributors:
 *    Min He <[email protected]>
 *    Bing Niu <[email protected]>
 *    Zhi Wang <[email protected]>
 *
 */

#include "i915_drv.h"
#include "gvt.h"

static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
{}

/* We give 2 seconds higher prio for vGPU during start */
#define GVT_SCHED_VGPU_PRI_TIME

struct vgpu_sched_data {};

struct gvt_sched_data {};

static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
{}

#define GVT_TS_BALANCE_PERIOD_MS
#define GVT_TS_BALANCE_STAGE_NUM

static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
{}

static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{}

static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
{}

/* in nanosecond */
#define GVT_DEFAULT_TIME_SLICE

static void tbs_sched_func(struct gvt_sched_data *sched_data)
{}

void intel_gvt_schedule(struct intel_gvt *gvt)
{}

static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
{}

static int tbs_sched_init(struct intel_gvt *gvt)
{}

static void tbs_sched_clean(struct intel_gvt *gvt)
{}

static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
{}

static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{}

static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{}

static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
{}

static const struct intel_gvt_sched_policy_ops tbs_schedule_ops =;

int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
{}

void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
{}

/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
 * sched_data, and sched_ctl. We see these 2 data as part of
 * the global scheduler which are proteced by gvt->sched_lock.
 * Caller should make their decision if the vgpu_lock should
 * be hold outside.
 */

int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
{}

void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
{}

void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
{}

void intel_gvt_kick_schedule(struct intel_gvt *gvt)
{}

void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{}