linux/drivers/target/target_core_device.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
 * Filename:  target_core_device.c (based on iscsi_target_device.c)
 *
 * This file contains the TCM Virtual Device and Disk Transport
 * agnostic related functions.
 *
 * (c) Copyright 2003-2013 Datera, Inc.
 *
 * Nicholas A. Bellinger <[email protected]>
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/export.h>
#include <linux/t10-pi.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>

#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>

#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

static DEFINE_MUTEX(device_mutex);
static DEFINE_IDR(devices_idr);

static struct se_hba *lun0_hba;
/* not static, needed by tpg.c */
struct se_device *g_lun0_dev;

sense_reason_t
transport_lookup_cmd_lun(struct se_cmd *se_cmd)
{}
EXPORT_SYMBOL();

int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
{}
EXPORT_SYMBOL();

bool target_lun_is_rdonly(struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

/*
 * This function is called from core_scsi3_emulate_pro_register_and_move()
 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
 * when a matching rtpi is found.
 */
struct se_dev_entry *core_get_se_deve_from_rtpi(
	struct se_node_acl *nacl,
	u16 rtpi)
{}

void core_free_device_list_for_node(
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{}

void core_update_device_list_access(
	u64 mapped_lun,
	bool lun_access_ro,
	struct se_node_acl *nacl)
{}

/*
 * Called with rcu_read_lock or nacl->device_list_lock held.
 */
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
{}
EXPORT_SYMBOL();

void target_pr_kref_release(struct kref *kref)
{}

/*
 * Establish UA condition on SCSI device - all LUNs
 */
void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq)
{}

static void
target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
			     bool skip_new)
{}

int core_enable_device_list_for_node(
	struct se_lun *lun,
	struct se_lun_acl *lun_acl,
	u64 mapped_lun,
	bool lun_access_ro,
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{}

void core_disable_device_list_for_node(
	struct se_lun *lun,
	struct se_dev_entry *orig,
	struct se_node_acl *nacl,
	struct se_portal_group *tpg)
{}

/*      core_clear_lun_from_tpg():
 *
 *
 */
void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{}

static void se_release_vpd_for_dev(struct se_device *dev)
{}

static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{}

int core_dev_add_lun(
	struct se_portal_group *tpg,
	struct se_device *dev,
	struct se_lun *lun)
{}

/*      core_dev_del_lun():
 *
 *
 */
void core_dev_del_lun(
	struct se_portal_group *tpg,
	struct se_lun *lun)
{}

struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_node_acl *nacl,
	u64 mapped_lun,
	int *ret)
{}

int core_dev_add_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl,
	struct se_lun *lun,
	bool lun_access_ro)
{}

int core_dev_del_initiator_node_lun_acl(
	struct se_lun *lun,
	struct se_lun_acl *lacl)
{}

void core_dev_free_initiator_node_lun_acl(
	struct se_portal_group *tpg,
	struct se_lun_acl *lacl)
{}

static void scsi_dump_inquiry(struct se_device *dev)
{}

struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{}

/*
 * Check if the underlying struct block_device supports discard and if yes
 * configure the UNMAP parameters.
 */
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
				       struct block_device *bdev)
{}
EXPORT_SYMBOL();

/*
 * Convert from blocksize advertised to the initiator to the 512 byte
 * units unconditionally used by the Linux block layer.
 */
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
{}
EXPORT_SYMBOL();

struct devices_idr_iter {};

static int target_devices_idr_iter(int id, void *p, void *data)
	 __must_hold(&device_mutex)
{}

/**
 * target_for_each_device - iterate over configured devices
 * @fn: iterator function
 * @data: pointer to data that will be passed to fn
 *
 * fn must return 0 to continue looping over devices. non-zero will break
 * from the loop and return that value to the caller.
 */
int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
			   void *data)
{}

int target_configure_device(struct se_device *dev)
{}

void target_free_device(struct se_device *dev)
{}

int core_dev_setup_virtual_lun0(void)
{}


void core_dev_release_virtual_lun0(void)
{}

/*
 * Common CDB parsing for kernel and user passthrough.
 */
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
	sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{}
EXPORT_SYMBOL();