linux/drivers/block/aoe/aoecmd.c

/* Copyright (c) 2013 Coraid, Inc.  See COPYING for GPL terms. */
/*
 * aoecmd.c
 * Filesystem request handling methods
 */

#include <linux/ata.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
#include <linux/blk-mq.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <net/net_namespace.h>
#include <asm/unaligned.h>
#include <linux/uio.h>
#include "aoe.h"

#define MAXIOC

static void ktcomplete(struct frame *, struct sk_buff *);
static int count_targets(struct aoedev *d, int *untainted);

static struct buf *nextbuf(struct aoedev *);

static int aoe_deadsecs =;
module_param(aoe_deadsecs, int, 0644);
MODULE_PARM_DESC();

static int aoe_maxout =;
module_param(aoe_maxout, int, 0644);
MODULE_PARM_DESC();

/* The number of online cpus during module initialization gives us a
 * convenient heuristic cap on the parallelism used for ktio threads
 * doing I/O completion.  It is not important that the cap equal the
 * actual number of running CPUs at any given time, but because of CPU
 * hotplug, we take care to use ncpus instead of using
 * num_online_cpus() after module initialization.
 */
static int ncpus;

/* mutex lock used for synchronization while thread spawning */
static DEFINE_MUTEX(ktio_spawn_lock);

static wait_queue_head_t *ktiowq;
static struct ktstate *kts;

/* io completion queue */
struct iocq_ktio {};
static struct iocq_ktio *iocq;

static struct page *empty_page;

static struct sk_buff *
new_skb(ulong len)
{}

static struct frame *
getframe_deferred(struct aoedev *d, u32 tag)
{}

static struct frame *
getframe(struct aoedev *d, u32 tag)
{}

/*
 * Leave the top bit clear so we have tagspace for userland.
 * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
 * This driver reserves tag -1 to mean "unused frame."
 */
static int
newtag(struct aoedev *d)
{}

static u32
aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
{}

static inline void
put_lba(struct aoe_atahdr *ah, sector_t lba)
{}

static struct aoeif *
ifrotate(struct aoetgt *t)
{}

static void
skb_pool_put(struct aoedev *d, struct sk_buff *skb)
{}

static struct sk_buff *
skb_pool_get(struct aoedev *d)
{}

void
aoe_freetframe(struct frame *f)
{}

static struct frame *
newtframe(struct aoedev *d, struct aoetgt *t)
{}

static struct frame *
newframe(struct aoedev *d)
{}

static void
skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
{}

static void
fhash(struct frame *f)
{}

static void
ata_rw_frameinit(struct frame *f)
{}

static int
aoecmd_ata_rw(struct aoedev *d)
{}

/* some callers cannot sleep, and they can call this function,
 * transmitting the packets later, when interrupts are on
 */
static void
aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
{}

static void
resend(struct aoedev *d, struct frame *f)
{}

static int
tsince_hr(struct frame *f)
{}

static int
tsince(u32 tag)
{}

static struct aoeif *
getif(struct aoetgt *t, struct net_device *nd)
{}

static void
ejectif(struct aoetgt *t, struct aoeif *ifp)
{}

static struct frame *
reassign_frame(struct frame *f)
{}

static void
probe(struct aoetgt *t)
{}

static long
rto(struct aoedev *d)
{}

static void
rexmit_deferred(struct aoedev *d)
{}

/* An aoetgt accumulates demerits quickly, and successful
 * probing redeems the aoetgt slowly.
 */
static void
scorn(struct aoetgt *t)
{}

static int
count_targets(struct aoedev *d, int *untainted)
{}

static void
rexmit_timer(struct timer_list *timer)
{}

static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{}

static struct buf *
nextbuf(struct aoedev *d)
{}

/* enters with d->lock held */
void
aoecmd_work(struct aoedev *d)
{}

/* this function performs work that has been deferred until sleeping is OK
 */
void
aoecmd_sleepwork(struct work_struct *work)
{}

static void
ata_ident_fixstring(u16 *id, int ns)
{}

static void
ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
{}

static void
calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
{}

static struct aoetgt *
gettgt(struct aoedev *d, char *addr)
{}

static void
bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
{}

void
aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
{}

static void
aoe_end_buf(struct aoedev *d, struct buf *buf)
{}

static void
ktiocomplete(struct frame *f)
{}

/* Enters with iocq.lock held.
 * Returns true iff responses needing processing remain.
 */
static int
ktio(int id)
{}

static int
kthread(void *vp)
{}

void
aoe_ktstop(struct ktstate *k)
{}

int
aoe_ktstart(struct ktstate *k)
{}

/* pass it off to kthreads for processing */
static void
ktcomplete(struct frame *f, struct sk_buff *skb)
{}

struct sk_buff *
aoecmd_ata_rsp(struct sk_buff *skb)
{}

void
aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
{}

struct sk_buff *
aoecmd_ata_id(struct aoedev *d)
{}

static struct aoetgt **
grow_targets(struct aoedev *d)
{}

static struct aoetgt *
addtgt(struct aoedev *d, char *addr, ulong nframes)
{}

static void
setdbcnt(struct aoedev *d)
{}

static void
setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
{}

void
aoecmd_cfg_rsp(struct sk_buff *skb)
{}

void
aoecmd_wreset(struct aoetgt *t)
{}

void
aoecmd_cleanslate(struct aoedev *d)
{}

void
aoe_failbuf(struct aoedev *d, struct buf *buf)
{}

void
aoe_flush_iocq(void)
{}

void
aoe_flush_iocq_by_index(int id)
{}

int __init
aoecmd_init(void)
{}

void
aoecmd_exit(void)
{}