linux/drivers/clk/baikal-t1/ccu-div.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
 *
 * Authors:
 *   Serge Semin <[email protected]>
 *   Dmitry Dunaev <[email protected]>
 *
 * Baikal-T1 CCU Dividers interface driver
 */

#define pr_fmt(fmt)

#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/bits.h>
#include <linux/bitfield.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/spinlock.h>
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/time64.h>
#include <linux/debugfs.h>

#include "ccu-div.h"

#define CCU_DIV_CTL
#define CCU_DIV_CTL_EN
#define CCU_DIV_CTL_RST
#define CCU_DIV_CTL_SET_CLKDIV
#define CCU_DIV_CTL_CLKDIV_FLD
#define CCU_DIV_CTL_CLKDIV_MASK(_width)
#define CCU_DIV_CTL_LOCK_SHIFTED
#define CCU_DIV_CTL_GATE_REF_BUF
#define CCU_DIV_CTL_LOCK_NORMAL

#define CCU_DIV_LOCK_CHECK_RETRIES

#define CCU_DIV_CLKDIV_MIN
#define CCU_DIV_CLKDIV_MAX(_mask)

/*
 * Use the next two methods until there are generic field setter and
 * getter available with non-constant mask support.
 */
static inline u32 ccu_div_get(u32 mask, u32 val)
{}

static inline u32 ccu_div_prep(u32 mask, u32 val)
{}

static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
						  unsigned long div)
{}

static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
					      unsigned long div)
{}

static int ccu_div_var_update_clkdiv(struct ccu_div *div,
				     unsigned long parent_rate,
				     unsigned long divider)
{}

static int ccu_div_var_enable(struct clk_hw *hw)
{}

static int ccu_div_gate_enable(struct clk_hw *hw)
{}

static void ccu_div_gate_disable(struct clk_hw *hw)
{}

static int ccu_div_gate_is_enabled(struct clk_hw *hw)
{}

static int ccu_div_buf_enable(struct clk_hw *hw)
{}

static void ccu_div_buf_disable(struct clk_hw *hw)
{}

static int ccu_div_buf_is_enabled(struct clk_hw *hw)
{}

static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
					     unsigned long parent_rate)
{}

static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
						     unsigned long parent_rate,
						     unsigned int mask)
{}

static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
				   unsigned long *parent_rate)
{}

/*
 * This method is used for the clock divider blocks, which support the
 * on-the-fly rate change. So due to lacking the EN bit functionality
 * they can't be gated before the rate adjustment.
 */
static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
				     unsigned long parent_rate)
{}

/*
 * This method is used for the clock divider blocks, which don't support
 * the on-the-fly rate change.
 */
static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
				     unsigned long parent_rate)
{}

static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
					       unsigned long parent_rate)
{}

static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
				     unsigned long *parent_rate)
{}

static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
				  unsigned long parent_rate)
{}

#ifdef CONFIG_DEBUG_FS

struct ccu_div_dbgfs_bit {};

#define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask)

static const struct ccu_div_dbgfs_bit ccu_div_bits[] =;

#define CCU_DIV_DBGFS_BIT_NUM

/*
 * It can be dangerous to change the Divider settings behind clock framework
 * back, therefore we don't provide any kernel config based compile time option
 * for this feature to enable.
 */
#undef CCU_DIV_ALLOW_WRITE_DEBUGFS
#ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS

static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
{
	const struct ccu_div_dbgfs_bit *bit = priv;
	struct ccu_div *div = bit->div;
	unsigned long flags;

	spin_lock_irqsave(&div->lock, flags);
	regmap_update_bits(div->sys_regs, div->reg_ctl,
			   bit->mask, val ? bit->mask : 0);
	spin_unlock_irqrestore(&div->lock, flags);

	return 0;
}

static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
{
	struct ccu_div *div = priv;
	unsigned long flags;
	u32 data;

	val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
		      CCU_DIV_CLKDIV_MAX(div->mask));
	data = ccu_div_prep(div->mask, val);

	spin_lock_irqsave(&div->lock, flags);
	regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
	spin_unlock_irqrestore(&div->lock, flags);

	return 0;
}

#define ccu_div_dbgfs_mode

#else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */

#define ccu_div_dbgfs_bit_set
#define ccu_div_dbgfs_var_clkdiv_set
#define ccu_div_dbgfs_mode

#endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */

static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
{}
DEFINE_DEBUGFS_ATTRIBUTE();

static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
{}
DEFINE_DEBUGFS_ATTRIBUTE();

static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
{}
DEFINE_DEBUGFS_ATTRIBUTE();

static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
{}

static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
{}

static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
{}

static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
{}

#else /* !CONFIG_DEBUG_FS */

#define ccu_div_var_debug_init
#define ccu_div_gate_debug_init
#define ccu_div_buf_debug_init
#define ccu_div_fixed_debug_init

#endif /* !CONFIG_DEBUG_FS */

static const struct clk_ops ccu_div_var_gate_to_set_ops =;

static const struct clk_ops ccu_div_var_nogate_ops =;

static const struct clk_ops ccu_div_gate_ops =;

static const struct clk_ops ccu_div_buf_ops =;

static const struct clk_ops ccu_div_fixed_ops =;

struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
{}

void ccu_div_hw_unregister(struct ccu_div *div)
{}