linux/kernel/events/ring_buffer.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Performance events ring-buffer code:
 *
 *  Copyright (C) 2008 Thomas Gleixner <[email protected]>
 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <[email protected]>
 */

#include <linux/perf_event.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/circ_buf.h>
#include <linux/poll.h>
#include <linux/nospec.h>

#include "internal.h"

static void perf_output_wakeup(struct perf_output_handle *handle)
{}

/*
 * We need to ensure a later event_id doesn't publish a head when a former
 * event isn't done writing. However since we need to deal with NMIs we
 * cannot fully serialize things.
 *
 * We only publish the head (and generate a wakeup) when the outer-most
 * event completes.
 */
static void perf_output_get_handle(struct perf_output_handle *handle)
{}

static void perf_output_put_handle(struct perf_output_handle *handle)
{}

static __always_inline bool
ring_buffer_has_space(unsigned long head, unsigned long tail,
		      unsigned long data_size, unsigned int size,
		      bool backward)
{}

static __always_inline int
__perf_output_begin(struct perf_output_handle *handle,
		    struct perf_sample_data *data,
		    struct perf_event *event, unsigned int size,
		    bool backward)
{}

int perf_output_begin_forward(struct perf_output_handle *handle,
			      struct perf_sample_data *data,
			      struct perf_event *event, unsigned int size)
{}

int perf_output_begin_backward(struct perf_output_handle *handle,
			       struct perf_sample_data *data,
			       struct perf_event *event, unsigned int size)
{}

int perf_output_begin(struct perf_output_handle *handle,
		      struct perf_sample_data *data,
		      struct perf_event *event, unsigned int size)
{}

unsigned int perf_output_copy(struct perf_output_handle *handle,
		      const void *buf, unsigned int len)
{}

unsigned int perf_output_skip(struct perf_output_handle *handle,
			      unsigned int len)
{}

void perf_output_end(struct perf_output_handle *handle)
{}

static void
ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
{}

void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
{}
EXPORT_SYMBOL_GPL();

/*
 * This is called before hardware starts writing to the AUX area to
 * obtain an output handle and make sure there's room in the buffer.
 * When the capture completes, call perf_aux_output_end() to commit
 * the recorded data to the buffer.
 *
 * The ordering is similar to that of perf_output_{begin,end}, with
 * the exception of (B), which should be taken care of by the pmu
 * driver, since ordering rules will differ depending on hardware.
 *
 * Call this from pmu::start(); see the comment in perf_aux_output_end()
 * about its use in pmu callbacks. Both can also be called from the PMI
 * handler if needed.
 */
void *perf_aux_output_begin(struct perf_output_handle *handle,
			    struct perf_event *event)
{}
EXPORT_SYMBOL_GPL();

static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
{}

/*
 * Commit the data written by hardware into the ring buffer by adjusting
 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
 * pmu driver's responsibility to observe ordering rules of the hardware,
 * so that all the data is externally visible before this is called.
 *
 * Note: this has to be called from pmu::stop() callback, as the assumption
 * of the AUX buffer management code is that after pmu::stop(), the AUX
 * transaction must be stopped and therefore drop the AUX reference count.
 */
void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
{}
EXPORT_SYMBOL_GPL();

/*
 * Skip over a given number of bytes in the AUX buffer, due to, for example,
 * hardware's alignment constraints.
 */
int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
{}
EXPORT_SYMBOL_GPL();

void *perf_get_aux(struct perf_output_handle *handle)
{}
EXPORT_SYMBOL_GPL();

/*
 * Copy out AUX data from an AUX handle.
 */
long perf_output_copy_aux(struct perf_output_handle *aux_handle,
			  struct perf_output_handle *handle,
			  unsigned long from, unsigned long to)
{}

#define PERF_AUX_GFP

static struct page *rb_alloc_aux_page(int node, int order)
{}

static void rb_free_aux_page(struct perf_buffer *rb, int idx)
{}

static void __rb_free_aux(struct perf_buffer *rb)
{}

int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
		 pgoff_t pgoff, int nr_pages, long watermark, int flags)
{}

void rb_free_aux(struct perf_buffer *rb)
{}

#ifndef CONFIG_PERF_USE_VMALLOC

/*
 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
 */

static struct page *
__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{
	if (pgoff > rb->nr_pages)
		return NULL;

	if (pgoff == 0)
		return virt_to_page(rb->user_page);

	return virt_to_page(rb->data_pages[pgoff - 1]);
}

static void *perf_mmap_alloc_page(int cpu)
{
	struct page *page;
	int node;

	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
	if (!page)
		return NULL;

	return page_address(page);
}

static void perf_mmap_free_page(void *addr)
{
	struct page *page = virt_to_page(addr);

	page->mapping = NULL;
	__free_page(page);
}

struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{
	struct perf_buffer *rb;
	unsigned long size;
	int i, node;

	size = sizeof(struct perf_buffer);
	size += nr_pages * sizeof(void *);

	if (order_base_2(size) > PAGE_SHIFT+MAX_PAGE_ORDER)
		goto fail;

	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
	rb = kzalloc_node(size, GFP_KERNEL, node);
	if (!rb)
		goto fail;

	rb->user_page = perf_mmap_alloc_page(cpu);
	if (!rb->user_page)
		goto fail_user_page;

	for (i = 0; i < nr_pages; i++) {
		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
		if (!rb->data_pages[i])
			goto fail_data_pages;
	}

	rb->nr_pages = nr_pages;

	ring_buffer_init(rb, watermark, flags);

	return rb;

fail_data_pages:
	for (i--; i >= 0; i--)
		perf_mmap_free_page(rb->data_pages[i]);

	perf_mmap_free_page(rb->user_page);

fail_user_page:
	kfree(rb);

fail:
	return NULL;
}

void rb_free(struct perf_buffer *rb)
{
	int i;

	perf_mmap_free_page(rb->user_page);
	for (i = 0; i < rb->nr_pages; i++)
		perf_mmap_free_page(rb->data_pages[i]);
	kfree(rb);
}

#else
static struct page *
__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{}

static void perf_mmap_unmark_page(void *addr)
{}

static void rb_free_work(struct work_struct *work)
{}

void rb_free(struct perf_buffer *rb)
{}

struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
{}

#endif

struct page *
perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
{}