linux/fs/gfs2/glock.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
 */

#define pr_fmt(fmt)

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/hash.h>
#include <linux/jhash.h>
#include <linux/kallsyms.h>
#include <linux/gfs2_ondisk.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
#include <linux/percpu.h>
#include <linux/list_sort.h>
#include <linux/lockref.h>
#include <linux/rhashtable.h>
#include <linux/pid_namespace.h>
#include <linux/fdtable.h>
#include <linux/file.h>

#include "gfs2.h"
#include "incore.h"
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
#include "util.h"
#include "bmap.h"
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"

struct gfs2_glock_iter {};

glock_examiner;

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
static void request_demote(struct gfs2_glock *gl, unsigned int state,
			   unsigned long delay, bool remote);

static struct dentry *gfs2_root;
static LIST_HEAD(lru_list);
static atomic_t lru_count =;
static DEFINE_SPINLOCK(lru_lock);

#define GFS2_GL_HASH_SHIFT
#define GFS2_GL_HASH_SIZE

static const struct rhashtable_params ht_parms =;

static struct rhashtable gl_hash_table;

#define GLOCK_WAIT_TABLE_BITS
#define GLOCK_WAIT_TABLE_SIZE
static wait_queue_head_t glock_wait_table[GLOCK_WAIT_TABLE_SIZE] __cacheline_aligned;

struct wait_glock_queue {};

static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
			       int sync, void *key)
{}

static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
{}

/**
 * wake_up_glock  -  Wake up waiters on a glock
 * @gl: the glock
 */
static void wake_up_glock(struct gfs2_glock *gl)
{}

static void gfs2_glock_dealloc(struct rcu_head *rcu)
{}

/**
 * glock_blocked_by_withdraw - determine if we can still use a glock
 * @gl: the glock
 *
 * We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
 * when we're withdrawn. For example, to maintain metadata integrity, we should
 * disallow the use of inode and rgrp glocks when withdrawn. Other glocks like
 * the iopen or freeze glock may be safely used because none of their
 * metadata goes through the journal. So in general, we should disallow all
 * glocks that are journaled, and allow all the others. One exception is:
 * we need to allow our active journal to be promoted and demoted so others
 * may recover it and we can reacquire it when they're done.
 */
static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
{}

static void __gfs2_glock_free(struct gfs2_glock *gl)
{}

void gfs2_glock_free(struct gfs2_glock *gl) {}

void gfs2_glock_free_later(struct gfs2_glock *gl) {}

static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
{}

/**
 * gfs2_glock_hold() - increment reference count on glock
 * @gl: The glock to hold
 *
 */

struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl)
{}

static void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{}

static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{}

/*
 * Enqueue the glock on the work queue.  Passes one glock reference on to the
 * work queue.
 */
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {}

static void __gfs2_glock_put(struct gfs2_glock *gl)
{}

static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl)
{}

/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
 *
 */

void gfs2_glock_put(struct gfs2_glock *gl)
{}

/*
 * gfs2_glock_put_async - Decrement reference count without sleeping
 * @gl: The glock to put
 *
 * Decrement the reference count on glock immediately unless it is the last
 * reference.  Defer putting the last reference to work queue context.
 */
void gfs2_glock_put_async(struct gfs2_glock *gl)
{}

/**
 * may_grant - check if it's ok to grant a new lock
 * @gl: The glock
 * @current_gh: One of the current holders of @gl
 * @gh: The lock request which we wish to grant
 *
 * With our current compatibility rules, if a glock has one or more active
 * holders (HIF_HOLDER flag set), any of those holders can be passed in as
 * @current_gh; they are all the same as far as compatibility with the new @gh
 * goes.
 *
 * Returns true if it's ok to grant the lock.
 */

static inline bool may_grant(struct gfs2_glock *gl,
			     struct gfs2_holder *current_gh,
			     struct gfs2_holder *gh)
{}

static void gfs2_holder_wake(struct gfs2_holder *gh)
{}

/**
 * do_error - Something unexpected has happened during a lock request
 * @gl: The glock
 * @ret: The status from the DLM
 */

static void do_error(struct gfs2_glock *gl, const int ret)
{}

/**
 * find_first_holder - find the first "holder" gh
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{}

/*
 * gfs2_instantiate - Call the glops instantiate function
 * @gh: The glock holder
 *
 * Returns: 0 if instantiate was successful, or error.
 */
int gfs2_instantiate(struct gfs2_holder *gh)
{}

/**
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
 * Returns true on success (i.e., progress was made or there are no waiters).
 */

static bool do_promote(struct gfs2_glock *gl)
{}

/**
 * find_first_waiter - find the first gh that's waiting for the glock
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{}

/**
 * find_last_waiter - find the last gh that's waiting for the glock
 * @gl: the glock
 *
 * This also is a fast way of finding out if there are any waiters.
 */

static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl)
{}

/**
 * state_change - record that the glock is now in a different state
 * @gl: the glock
 * @new_state: the new state
 */

static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{}

static void gfs2_set_demote(struct gfs2_glock *gl)
{}

static void gfs2_demote_wake(struct gfs2_glock *gl)
{}

/**
 * finish_xmote - The DLM has replied to one of our lock requests
 * @gl: The glock
 * @ret: The status from the DLM
 *
 */

static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{}

static bool is_system_glock(struct gfs2_glock *gl)
{}

/**
 * do_xmote - Calls the DLM to change the state of a lock
 * @gl: The lock state
 * @gh: The holder (only for promotes)
 * @target: The target lock state
 *
 */

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh,
					 unsigned int target)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{}

/**
 * run_queue - do all outstanding tasks related to a glock
 * @gl: The glock in question
 * @nonblock: True if we must not block in run_queue
 *
 */

static void run_queue(struct gfs2_glock *gl, const int nonblock)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{}

/**
 * glock_set_object - set the gl_object field of a glock
 * @gl: the glock
 * @object: the object
 */
void glock_set_object(struct gfs2_glock *gl, void *object)
{}

/**
 * glock_clear_object - clear the gl_object field of a glock
 * @gl: the glock
 * @object: object the glock currently points at
 */
void glock_clear_object(struct gfs2_glock *gl, void *object)
{}

void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
{}

bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
{}

static void gfs2_glock_poke(struct gfs2_glock *gl)
{}

static bool gfs2_try_evict(struct gfs2_glock *gl)
{}

bool gfs2_queue_try_to_evict(struct gfs2_glock *gl)
{}

static bool gfs2_queue_verify_evict(struct gfs2_glock *gl)
{}

static void delete_work_func(struct work_struct *work)
{}

static void glock_work_func(struct work_struct *work)
{}

static struct gfs2_glock *find_insert_glock(struct lm_lockname *name,
					    struct gfs2_glock *new)
{}

/**
 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 * @sdp: The GFS2 superblock
 * @number: the lock number
 * @glops: The glock_operations to use
 * @create: If 0, don't create the glock if it doesn't exist
 * @glp: the glock is returned here
 *
 * This does not lock a glock, just finds/creates structures for one.
 *
 * Returns: errno
 */

int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
		   const struct gfs2_glock_operations *glops, int create,
		   struct gfs2_glock **glp)
{}

/**
 * __gfs2_holder_init - initialize a struct gfs2_holder in the default way
 * @gl: the glock
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 */

void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
			struct gfs2_holder *gh, unsigned long ip)
{}

/**
 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 * Don't mess with the glock.
 *
 */

void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
{}

/**
 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 * @gh: the holder structure
 *
 */

void gfs2_holder_uninit(struct gfs2_holder *gh)
{}

static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
					unsigned long start_time)
{}

/**
 * gfs2_glock_holder_ready - holder is ready and its error code can be collected
 * @gh: the glock holder
 *
 * Called when a glock holder no longer needs to be waited for because it is
 * now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has
 * failed (gh_error != 0).
 */

int gfs2_glock_holder_ready(struct gfs2_holder *gh)
{}

/**
 * gfs2_glock_wait - wait on a glock acquisition
 * @gh: the glock holder
 *
 * Returns: 0 on success
 */

int gfs2_glock_wait(struct gfs2_holder *gh)
{}

static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
{}

/**
 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions
 * @num_gh: the number of holders in the array
 * @ghs: the glock holder array
 *
 * Returns: 0 on success, meaning all glocks have been granted and are held.
 *          -ESTALE if the request timed out, meaning all glocks were released,
 *          and the caller should retry the operation.
 */

int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
{}

/**
 * request_demote - process a demote request
 * @gl: the glock
 * @state: the state the caller wants us to change to
 * @delay: zero to demote immediately; otherwise pending demote
 * @remote: true if this came from a different cluster node
 *
 * There are only two requests that we are going to see in actual
 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
 */

static void request_demote(struct gfs2_glock *gl, unsigned int state,
			   unsigned long delay, bool remote)
{}

void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
{}

static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
{}

/**
 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 * @gh: the holder structure to add
 *
 * Eventually we should move the recursive locking trap to a
 * debugging option or something like that. This is the fast
 * path and needs to have the minimum number of distractions.
 * 
 */

static inline void add_to_queue(struct gfs2_holder *gh)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{}

/**
 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
 * @gh: the holder structure
 *
 * if (gh->gh_flags & GL_ASYNC), this never returns an error
 *
 * Returns: 0, GLR_TRYFAILED, or errno on failure
 */

int gfs2_glock_nq(struct gfs2_holder *gh)
{}

/**
 * gfs2_glock_poll - poll to see if an async request has been completed
 * @gh: the holder
 *
 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
 */

int gfs2_glock_poll(struct gfs2_holder *gh)
{}

static inline bool needs_demote(struct gfs2_glock *gl)
{}

static void __gfs2_glock_dq(struct gfs2_holder *gh)
{}

/**
 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
 * @gh: the glock holder
 *
 */
void gfs2_glock_dq(struct gfs2_holder *gh)
{}

void gfs2_glock_dq_wait(struct gfs2_holder *gh)
{}

/**
 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
 * @gh: the holder structure
 *
 */

void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
{}

/**
 * gfs2_glock_nq_num - acquire a glock based on lock number
 * @sdp: the filesystem
 * @number: the lock number
 * @glops: the glock operations for the type of glock
 * @state: the state to acquire the glock in
 * @flags: modifier flags for the acquisition
 * @gh: the struct gfs2_holder
 *
 * Returns: errno
 */

int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
		      const struct gfs2_glock_operations *glops,
		      unsigned int state, u16 flags, struct gfs2_holder *gh)
{}

/**
 * glock_compare - Compare two struct gfs2_glock structures for sorting
 * @arg_a: the first structure
 * @arg_b: the second structure
 *
 */

static int glock_compare(const void *arg_a, const void *arg_b)
{}

/**
 * nq_m_sync - synchronously acquire more than one glock in deadlock free order
 * @num_gh: the number of structures
 * @ghs: an array of struct gfs2_holder structures
 * @p: placeholder for the holder structure to pass back
 *
 * Returns: 0 on success (all glocks acquired),
 *          errno on failure (no glocks acquired)
 */

static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
		     struct gfs2_holder **p)
{}

/**
 * gfs2_glock_nq_m - acquire multiple glocks
 * @num_gh: the number of structures
 * @ghs: an array of struct gfs2_holder structures
 *
 * Returns: 0 on success (all glocks acquired),
 *          errno on failure (no glocks acquired)
 */

int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
{}

/**
 * gfs2_glock_dq_m - release multiple glocks
 * @num_gh: the number of structures
 * @ghs: an array of struct gfs2_holder structures
 *
 */

void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
{}

void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
{}

/**
 * gfs2_should_freeze - Figure out if glock should be frozen
 * @gl: The glock in question
 *
 * Glocks are not frozen if (a) the result of the dlm operation is
 * an error, (b) the locking operation was an unlock operation or
 * (c) if there is a "noexp" flagged request anywhere in the queue
 *
 * Returns: 1 if freezing should occur, 0 otherwise
 */

static int gfs2_should_freeze(const struct gfs2_glock *gl)
{}

/**
 * gfs2_glock_complete - Callback used by locking
 * @gl: Pointer to the glock
 * @ret: The return value from the dlm
 *
 * The gl_reply field is under the gl_lockref.lock lock so that it is ok
 * to use a bitfield shared with other glock state fields.
 */

void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
{}

static int glock_cmp(void *priv, const struct list_head *a,
		     const struct list_head *b)
{}

static bool can_free_glock(struct gfs2_glock *gl)
{}

/**
 * gfs2_dispose_glock_lru - Demote a list of glocks
 * @list: The list to dispose of
 *
 * Disposing of glocks may involve disk accesses, so that here we sort
 * the glocks by number (i.e. disk location of the inodes) so that if
 * there are any such accesses, they'll be sent in order (mostly).
 *
 * Must be called under the lru_lock, but may drop and retake this
 * lock. While the lru_lock is dropped, entries may vanish from the
 * list, but no new entries will appear on the list (since it is
 * private)
 */

static unsigned long gfs2_dispose_glock_lru(struct list_head *list)
__releases(&lru_lock)
__acquires(&lru_lock)
{}

/**
 * gfs2_scan_glock_lru - Scan the LRU looking for locks to demote
 * @nr: The number of entries to scan
 *
 * This function selects the entries on the LRU which are able to
 * be demoted, and then kicks off the process by calling
 * gfs2_dispose_glock_lru() above.
 */

static unsigned long gfs2_scan_glock_lru(unsigned long nr)
{}

static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
					    struct shrink_control *sc)
{}

static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
					     struct shrink_control *sc)
{}

static struct shrinker *glock_shrinker;

/**
 * glock_hash_walk - Call a function for glock in a hash bucket
 * @examiner: the function
 * @sdp: the filesystem
 *
 * Note that the function can be called multiple times on the same
 * object.  So the user must ensure that the function can cope with
 * that.
 */

static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
{}

void gfs2_cancel_delete_work(struct gfs2_glock *gl)
{}

static void flush_delete_work(struct gfs2_glock *gl)
{}

void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
{}

/**
 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
 * @gl: The glock to thaw
 *
 */

static void thaw_glock(struct gfs2_glock *gl)
{}

/**
 * clear_glock - look at a glock and see if we can free it from glock cache
 * @gl: the glock to look at
 *
 */

static void clear_glock(struct gfs2_glock *gl)
{}

/**
 * gfs2_glock_thaw - Thaw any frozen glocks
 * @sdp: The super block
 *
 */

void gfs2_glock_thaw(struct gfs2_sbd *sdp)
{}

static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
{}

static void dump_glock_func(struct gfs2_glock *gl)
{}

static void withdraw_dq(struct gfs2_glock *gl)
{}

void gfs2_gl_dq_holders(struct gfs2_sbd *sdp)
{}

/**
 * gfs2_gl_hash_clear - Empty out the glock hash table
 * @sdp: the filesystem
 *
 * Called when unmounting the filesystem.
 */

void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{}

static const char *state2str(unsigned state)
{}

static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
{}

/**
 * dump_holder - print information about a glock holder
 * @seq: the seq_file struct
 * @gh: the glock holder
 * @fs_id_buf: pointer to file system id (if requested)
 *
 */

static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
			const char *fs_id_buf)
{}

static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
{}

/**
 * gfs2_dump_glock - print information about a glock
 * @seq: The seq_file struct
 * @gl: the glock
 * @fsid: If true, also dump the file system id
 *
 * The file format is as follows:
 * One line per object, capital letters are used to indicate objects
 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
 * other objects are indented by a single space and follow the glock to
 * which they are related. Fields are indicated by lower case letters
 * followed by a colon and the field value, except for strings which are in
 * [] so that its possible to see if they are composed of spaces for
 * example. The field's are n = number (id of the object), f = flags,
 * t = type, s = state, r = refcount, e = error, p = pid.
 *
 */

void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
{}

static int gfs2_glstats_seq_show(struct seq_file *seq, void *iter_ptr)
{}

static const char *gfs2_gltype[] =;

static const char *gfs2_stype[] =;

#define GFS2_NR_SBSTATS

static int gfs2_sbstats_seq_show(struct seq_file *seq, void *iter_ptr)
{}

int __init gfs2_glock_init(void)
{}

void gfs2_glock_exit(void)
{}

static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
{}

static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
	__acquires(RCU)
{}

static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
				 loff_t *pos)
{}

static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
	__releases(RCU)
{}

static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
{}

static void *gfs2_sbstats_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *gfs2_sbstats_seq_next(struct seq_file *seq, void *iter_ptr,
				   loff_t *pos)
{}

static void gfs2_sbstats_seq_stop(struct seq_file *seq, void *iter_ptr)
{}

static const struct seq_operations gfs2_glock_seq_ops =;

static const struct seq_operations gfs2_glstats_seq_ops =;

static const struct seq_operations gfs2_sbstats_sops =;

#define GFS2_SEQ_GOODSIZE

static int __gfs2_glocks_open(struct inode *inode, struct file *file,
			      const struct seq_operations *ops)
{}

static int gfs2_glocks_open(struct inode *inode, struct file *file)
{}

static int gfs2_glocks_release(struct inode *inode, struct file *file)
{}

static int gfs2_glstats_open(struct inode *inode, struct file *file)
{}

static const struct file_operations gfs2_glocks_fops =;

static const struct file_operations gfs2_glstats_fops =;

struct gfs2_glockfd_iter {};

static struct task_struct *gfs2_glockfd_next_task(struct gfs2_glockfd_iter *i)
{}

static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i)
{}

static void *gfs2_glockfd_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *gfs2_glockfd_seq_next(struct seq_file *seq, void *iter_ptr,
				   loff_t *pos)
{}

static void gfs2_glockfd_seq_stop(struct seq_file *seq, void *iter_ptr)
{}

static void gfs2_glockfd_seq_show_flock(struct seq_file *seq,
					struct gfs2_glockfd_iter *i)
{}

static int gfs2_glockfd_seq_show(struct seq_file *seq, void *iter_ptr)
{}

static const struct seq_operations gfs2_glockfd_seq_ops =;

static int gfs2_glockfd_open(struct inode *inode, struct file *file)
{}

static const struct file_operations gfs2_glockfd_fops =;

DEFINE_SEQ_ATTRIBUTE();

void gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
{}

void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
{}

void gfs2_register_debugfs(void)
{}

void gfs2_unregister_debugfs(void)
{}