// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. */ #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/gfs2_ondisk.h> #include <linux/bio.h> #include <linux/posix_acl.h> #include <linux/security.h> #include "gfs2.h" #include "incore.h" #include "bmap.h" #include "glock.h" #include "glops.h" #include "inode.h" #include "log.h" #include "meta_io.h" #include "recovery.h" #include "rgrp.h" #include "util.h" #include "trans.h" #include "dir.h" #include "lops.h" struct workqueue_struct *gfs2_freeze_wq; extern struct workqueue_struct *gfs2_control_wq; static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) { … } /** * __gfs2_ail_flush - remove all buffers for a given lock from the AIL * @gl: the glock * @fsync: set when called from fsync (not all buffers will be clean) * @nr_revokes: Number of buffers to revoke * * None of the buffers should be dirty, locked, or pinned. */ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, unsigned int nr_revokes) { … } static int gfs2_ail_empty_gl(struct gfs2_glock *gl) { … } void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) { … } /** * gfs2_rgrp_metasync - sync out the metadata of a resource group * @gl: the glock protecting the resource group * */ static int gfs2_rgrp_metasync(struct gfs2_glock *gl) { … } /** * rgrp_go_sync - sync out the metadata for this glock * @gl: the glock * * Called when demoting or unlocking an EX glock. We must flush * to disk all dirty buffers/pages relating to this glock, and must not * return to caller to demote/unlock the glock until I/O is complete. */ static int rgrp_go_sync(struct gfs2_glock *gl) { … } /** * rgrp_go_inval - invalidate the metadata for this glock * @gl: the glock * @flags: * * We never used LM_ST_DEFERRED with resource groups, so that we * should always see the metadata flag set here. * */ static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { … } static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl, const char *fs_id_buf) { … } static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) { … } struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) { … } static void gfs2_clear_glop_pending(struct gfs2_inode *ip) { … } /** * gfs2_inode_metasync - sync out the metadata of an inode * @gl: the glock protecting the inode * */ int gfs2_inode_metasync(struct gfs2_glock *gl) { … } /** * inode_go_sync - Sync the dirty metadata of an inode * @gl: the glock protecting the inode * */ static int inode_go_sync(struct gfs2_glock *gl) { … } /** * inode_go_inval - prepare a inode glock to be released * @gl: the glock * @flags: * * Normally we invalidate everything, but if we are moving into * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we * can keep hold of the metadata, since it won't have changed. * */ static void inode_go_inval(struct gfs2_glock *gl, int flags) { … } static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) { … } /** * gfs2_inode_refresh - Refresh the incore copy of the dinode * @ip: The GFS2 inode * * Returns: errno */ int gfs2_inode_refresh(struct gfs2_inode *ip) { … } /** * inode_go_instantiate - read in an inode if necessary * @gl: The glock * * Returns: errno */ static int inode_go_instantiate(struct gfs2_glock *gl) { … } static int inode_go_held(struct gfs2_holder *gh) { … } /** * inode_go_dump - print information about an inode * @seq: The iterator * @gl: The glock * @fs_id_buf: file system id (may be empty) * */ static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl, const char *fs_id_buf) { … } /** * freeze_go_callback - A cluster node is requesting a freeze * @gl: the glock * @remote: true if this came from a different cluster node */ static void freeze_go_callback(struct gfs2_glock *gl, bool remote) { … } /** * freeze_go_xmote_bh - After promoting/demoting the freeze glock * @gl: the glock */ static int freeze_go_xmote_bh(struct gfs2_glock *gl) { … } /** * iopen_go_callback - schedule the dcache entry for the inode to be deleted * @gl: the glock * @remote: true if this came from a different cluster node * * gl_lockref.lock lock is held while calling this */ static void iopen_go_callback(struct gfs2_glock *gl, bool remote) { … } /** * inode_go_unlocked - wake up anyone waiting for dlm's unlock ast * @gl: glock being unlocked * * For now, this is only used for the journal inode glock. In withdraw * situations, we need to wait for the glock to be unlocked so that we know * other nodes may proceed with recovery / journal replay. */ static void inode_go_unlocked(struct gfs2_glock *gl) { … } /** * nondisk_go_callback - used to signal when a node did a withdraw * @gl: the nondisk glock * @remote: true if this came from a different cluster node * */ static void nondisk_go_callback(struct gfs2_glock *gl, bool remote) { … } const struct gfs2_glock_operations gfs2_meta_glops = …; const struct gfs2_glock_operations gfs2_inode_glops = …; const struct gfs2_glock_operations gfs2_rgrp_glops = …; const struct gfs2_glock_operations gfs2_freeze_glops = …; const struct gfs2_glock_operations gfs2_iopen_glops = …; const struct gfs2_glock_operations gfs2_flock_glops = …; const struct gfs2_glock_operations gfs2_nondisk_glops = …; const struct gfs2_glock_operations gfs2_quota_glops = …; const struct gfs2_glock_operations gfs2_journal_glops = …; const struct gfs2_glock_operations *gfs2_glops_list[] = …;