linux/drivers/usb/mon/mon_bin.c

// SPDX-License-Identifier: GPL-2.0
/*
 * The USB Monitor, inspired by Dave Harding's USBMon.
 *
 * This is a binary format reader.
 *
 * Copyright (C) 2006 Paolo Abeni ([email protected])
 * Copyright (C) 2006,2007 Pete Zaitcev ([email protected])
 */

#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/export.h>
#include <linux/usb.h>
#include <linux/poll.h>
#include <linux/compat.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/time64.h>

#include <linux/uaccess.h>

#include "usb_mon.h"

/*
 * Defined by USB 2.0 clause 9.3, table 9.2.
 */
#define SETUP_LEN

/* ioctl macros */
#define MON_IOC_MAGIC

#define MON_IOCQ_URB_LEN
/* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */
#define MON_IOCG_STATS
#define MON_IOCT_RING_SIZE
#define MON_IOCQ_RING_SIZE
#define MON_IOCX_GET
#define MON_IOCX_MFETCH
#define MON_IOCH_MFLUSH
/* #9 was MON_IOCT_SETAPI */
#define MON_IOCX_GETX

#ifdef CONFIG_COMPAT
#define MON_IOCX_GET32
#define MON_IOCX_MFETCH32
#define MON_IOCX_GETX32
#endif

/*
 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc).
 * But it's all right. Just use a simple way to make sure the chunk is never
 * smaller than a page.
 *
 * N.B. An application does not know our chunk size.
 *
 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with
 * page-sized chunks for the time being.
 */
#define CHUNK_SIZE
#define CHUNK_ALIGN(x)

/*
 * The magic limit was calculated so that it allows the monitoring
 * application to pick data once in two ticks. This way, another application,
 * which presumably drives the bus, gets to hog CPU, yet we collect our data.
 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an
 * enormous overhead built into the bus protocol, so we need about 1000 KB.
 *
 * This is still too much for most cases, where we just snoop a few
 * descriptor fetches for enumeration. So, the default is a "reasonable"
 * amount for systems with HZ=250 and incomplete bus saturation.
 *
 * XXX What about multi-megabyte URBs which take minutes to transfer?
 */
#define BUFF_MAX
#define BUFF_DFL
#define BUFF_MIN

/*
 * The per-event API header (2 per URB).
 *
 * This structure is seen in userland as defined by the documentation.
 */
struct mon_bin_hdr {};

/*
 * ISO vector, packed into the head of data stream.
 * This has to take 16 bytes to make sure that the end of buffer
 * wrap is not happening in the middle of a descriptor.
 */
struct mon_bin_isodesc {};

/* per file statistic */
struct mon_bin_stats {};

struct mon_bin_get {};

struct mon_bin_mfetch {};

#ifdef CONFIG_COMPAT
struct mon_bin_get32 {};

struct mon_bin_mfetch32 {};
#endif

/* Having these two values same prevents wrapping of the mon_bin_hdr */
#define PKT_ALIGN
#define PKT_SIZE

#define PKT_SZ_API0
#define PKT_SZ_API1

#define ISODESC_MAX

/* max number of USB bus supported */
#define MON_BIN_MAX_MINOR

/*
 * The buffer: map of used pages.
 */
struct mon_pgmap {};

/*
 * This gets associated with an open file struct.
 */
struct mon_reader_bin {};

static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp,
    unsigned int offset)
{}

#define MON_RING_EMPTY(rp)

static unsigned char xfer_to_pipe[4] =;

static const struct class mon_bin_class =;

static dev_t mon_bin_dev0;
static struct cdev mon_bin_cdev;

static void mon_buff_area_fill(const struct mon_reader_bin *rp,
    unsigned int offset, unsigned int size);
static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp);
static int mon_alloc_buff(struct mon_pgmap *map, int npages);
static void mon_free_buff(struct mon_pgmap *map, int npages);

/*
 * This is a "chunked memcpy". It does not manipulate any counters.
 */
static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
    unsigned int off, const unsigned char *from, unsigned int length)
{}

/*
 * This is a little worse than the above because it's "chunked copy_to_user".
 * The return value is an error code, not an offset.
 */
static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off,
    char __user *to, int length)
{}

/*
 * Allocate an (aligned) area in the buffer.
 * This is called under b_lock.
 * Returns ~0 on failure.
 */
static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp,
    unsigned int size)
{}

/*
 * This is the same thing as mon_buff_area_alloc, only it does not allow
 * buffers to wrap. This is needed by applications which pass references
 * into mmap-ed buffers up their stacks (libpcap can do that).
 *
 * Currently, we always have the header stuck with the data, although
 * it is not strictly speaking necessary.
 *
 * When a buffer would wrap, we place a filler packet to mark the space.
 */
static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
    unsigned int size)
{}

/*
 * Return a few (kilo-)bytes to the head of the buffer.
 * This is used if a data fetch fails.
 */
static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
{}

/*
 * This has to be called under both b_lock and fetch_lock, because
 * it accesses both b_cnt and b_out.
 */
static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size)
{}

static void mon_buff_area_fill(const struct mon_reader_bin *rp,
    unsigned int offset, unsigned int size)
{}

static inline char mon_bin_get_setup(unsigned char *setupb,
    const struct urb *urb, char ev_type)
{}

static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
    unsigned int offset, struct urb *urb, unsigned int length,
    char *flag)
{}

/*
 * This is the look-ahead pass in case of 'C Zi', when actual_length cannot
 * be used to determine the length of the whole contiguous buffer.
 */
static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp,
    struct urb *urb, unsigned int ndesc)
{}

static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
    unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc)
{}

static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
    char ev_type, int status)
{}

static void mon_bin_submit(void *data, struct urb *urb)
{}

static void mon_bin_complete(void *data, struct urb *urb, int status)
{}

static void mon_bin_error(void *data, struct urb *urb, int error)
{}

static int mon_bin_open(struct inode *inode, struct file *file)
{}

/*
 * Extract an event from buffer and copy it to user space.
 * Wait if there is no event ready.
 * Returns zero or error.
 */
static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp,
    struct mon_bin_hdr __user *hdr, unsigned int hdrbytes,
    void __user *data, unsigned int nbytes)
{}

static int mon_bin_release(struct inode *inode, struct file *file)
{}

static ssize_t mon_bin_read(struct file *file, char __user *buf,
    size_t nbytes, loff_t *ppos)
{}

/*
 * Remove at most nevents from chunked buffer.
 * Returns the number of removed events.
 */
static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents)
{}

/*
 * Fetch at most max event offsets into the buffer and put them into vec.
 * The events are usually freed later with mon_bin_flush.
 * Return the effective number of events fetched.
 */
static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp,
    u32 __user *vec, unsigned int max)
{}

/*
 * Count events. This is almost the same as the above mon_bin_fetch,
 * only we do not store offsets into user vector, and we have no limit.
 */
static int mon_bin_queued(struct mon_reader_bin *rp)
{}

/*
 */
static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{}

#ifdef CONFIG_COMPAT
static long mon_bin_compat_ioctl(struct file *file,
    unsigned int cmd, unsigned long arg)
{}
#endif /* CONFIG_COMPAT */

static __poll_t
mon_bin_poll(struct file *file, struct poll_table_struct *wait)
{}

/*
 * open and close: just keep track of how many times the device is
 * mapped, to use the proper memory allocation function.
 */
static void mon_bin_vma_open(struct vm_area_struct *vma)
{}

static void mon_bin_vma_close(struct vm_area_struct *vma)
{}

/*
 * Map ring pages to user space.
 */
static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf)
{}

static const struct vm_operations_struct mon_bin_vm_ops =;

static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
{}

static const struct file_operations mon_fops_binary =;

static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp)
{}

static int mon_alloc_buff(struct mon_pgmap *map, int npages)
{}

static void mon_free_buff(struct mon_pgmap *map, int npages)
{}

int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus)
{}

void mon_bin_del(struct mon_bus *mbus)
{}

int __init mon_bin_init(void)
{}

void mon_bin_exit(void)
{}