linux/fs/ocfs2/dcache.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * dcache.c
 *
 * dentry cache handling code
 *
 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
 */

#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/namei.h>

#include <cluster/masklog.h>

#include "ocfs2.h"

#include "alloc.h"
#include "dcache.h"
#include "dlmglue.h"
#include "file.h"
#include "inode.h"
#include "ocfs2_trace.h"

void ocfs2_dentry_attach_gen(struct dentry *dentry)
{}


static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
{}

static int ocfs2_match_dentry(struct dentry *dentry,
			      u64 parent_blkno,
			      int skip_unhashed)
{}

/*
 * Walk the inode alias list, and find a dentry which has a given
 * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it
 * is looking for a dentry_lock reference. The downconvert thread is
 * looking to unhash aliases, so we allow it to skip any that already
 * have that property.
 */
struct dentry *ocfs2_find_local_alias(struct inode *inode,
				      u64 parent_blkno,
				      int skip_unhashed)
{}

DEFINE_SPINLOCK();

/*
 * Attach this dentry to a cluster lock.
 *
 * Dentry locks cover all links in a given directory to a particular
 * inode. We do this so that ocfs2 can build a lock name which all
 * nodes in the cluster can agree on at all times. Shoving full names
 * in the cluster lock won't work due to size restrictions. Covering
 * links inside of a directory is a good compromise because it still
 * allows us to use the parent directory lock to synchronize
 * operations.
 *
 * Call this function with the parent dir semaphore and the parent dir
 * cluster lock held.
 *
 * The dir semaphore will protect us from having to worry about
 * concurrent processes on our node trying to attach a lock at the
 * same time.
 *
 * The dir cluster lock (held at either PR or EX mode) protects us
 * from unlink and rename on other nodes.
 *
 * A dput() can happen asynchronously due to pruning, so we cover
 * attaching and detaching the dentry lock with a
 * dentry_attach_lock.
 *
 * A node which has done lookup on a name retains a protected read
 * lock until final dput. If the user requests and unlink or rename,
 * the protected read is upgraded to an exclusive lock. Other nodes
 * who have seen the dentry will then be informed that they need to
 * downgrade their lock, which will involve d_delete on the
 * dentry. This happens in ocfs2_dentry_convert_worker().
 */
int ocfs2_dentry_attach_lock(struct dentry *dentry,
			     struct inode *inode,
			     u64 parent_blkno)
{}

/*
 * ocfs2_dentry_iput() and friends.
 *
 * At this point, our particular dentry is detached from the inodes
 * alias list, so there's no way that the locking code can find it.
 *
 * The interesting stuff happens when we determine that our lock needs
 * to go away because this is the last subdir alias in the
 * system. This function needs to handle a couple things:
 *
 * 1) Synchronizing lock shutdown with the downconvert threads. This
 *    is already handled for us via the lockres release drop function
 *    called in ocfs2_release_dentry_lock()
 *
 * 2) A race may occur when we're doing our lock shutdown and
 *    another process wants to create a new dentry lock. Right now we
 *    let them race, which means that for a very short while, this
 *    node might have two locks on a lock resource. This should be a
 *    problem though because one of them is in the process of being
 *    thrown out.
 */
static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb,
				   struct ocfs2_dentry_lock *dl)
{}

void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
			   struct ocfs2_dentry_lock *dl)
{}

static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode)
{}

/*
 * d_move(), but keep the locks in sync.
 *
 * When we are done, "dentry" will have the parent dir and name of
 * "target", which will be thrown away.
 *
 * We manually update the lock of "dentry" if need be.
 *
 * "target" doesn't have it's dentry lock touched - we allow the later
 * dput() to handle this for us.
 *
 * This is called during ocfs2_rename(), while holding parent
 * directory locks. The dentries have already been deleted on other
 * nodes via ocfs2_remote_dentry_delete().
 *
 * Normally, the VFS handles the d_move() for the file system, after
 * the ->rename() callback. OCFS2 wants to handle this internally, so
 * the new lock can be created atomically with respect to the cluster.
 */
void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target,
		       struct inode *old_dir, struct inode *new_dir)
{}

const struct dentry_operations ocfs2_dentry_ops =;