[RFC,6/8] clk: imx: pll14xx: support driver's registration by device tree

Message ID 20221019172019.2303223-7-dario.binacchi@amarulasolutions.com
State New
Headers show
Series
  • clk: imx8mn: setup clocks by device tree
Related show

Commit Message

Dario Binacchi Oct. 19, 2022, 5:20 p.m. UTC
The patch, backwards compatible, extends the driver to be able to be
registered from the device tree.

Signed-off-by: Dario Binacchi <dario.binacchi@amarulasolutions.com>
---

 drivers/clk/imx/clk-pll14xx.c | 242 ++++++++++++++++++++++++++++------
 1 file changed, 200 insertions(+), 42 deletions(-)

Patch

diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index 1d0f79e9c346..e7404bf5f6f2 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -12,6 +12,10 @@ 
 #include <linux/export.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 #include <linux/jiffies.h>
 
@@ -36,7 +40,9 @@ 
 
 struct clk_pll14xx {
 	struct clk_hw			hw;
-	void __iomem			*base;
+	struct imx_clk_iomap		gnrl_ctl;
+	struct imx_clk_iomap		div_ctl0;
+	struct imx_clk_iomap		div_ctl1;
 	enum imx_pll14xx_type		type;
 	const struct imx_pll14xx_rate_table *rate_table;
 	int rate_count;
@@ -88,6 +94,30 @@  struct imx_pll14xx_clk imx_1416x_pll = {
 };
 EXPORT_SYMBOL_GPL(imx_1416x_pll);
 
+static void imx_pll14xx_writel(u32 val, const struct imx_clk_iomap *reg)
+{
+	if (reg->mem)
+		writel_relaxed(val, reg->mem + reg->offset);
+	else if (reg->regmap)
+		regmap_write(reg->regmap, reg->offset, val);
+	else
+		pr_err("%s: memory address not set\n", __func__);
+}
+
+static u32 imx_pll14xx_readl(const struct imx_clk_iomap *reg)
+{
+	u32 val = 0;
+
+	if (reg->mem)
+		val = readl_relaxed(reg->mem + reg->offset);
+	else if (reg->regmap)
+		regmap_read(reg->regmap, reg->offset, &val);
+	else
+		pr_err("%s: memory address not set\n", __func__);
+
+	return val;
+}
+
 static const struct imx_pll14xx_rate_table *imx_get_pll_settings(
 		struct clk_pll14xx *pll, unsigned long rate)
 {
@@ -159,11 +189,11 @@  static void imx_pll14xx_calc_settings(struct clk_pll14xx *pll, unsigned long rat
 		return;
 	}
 
-	pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
+	pll_div_ctl0 = imx_pll14xx_readl(&pll->div_ctl0);
 	mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0);
 	pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0);
 	sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0);
-	pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1);
+	pll_div_ctl1 = imx_pll14xx_readl(&pll->div_ctl1);
 
 	/* Then see if we can get the desired rate by only adjusting kdiv (glitch free) */
 	rate_min = pll14xx_calc_rate(pll, mdiv, pdiv, sdiv, KDIV_MIN, prate);
@@ -247,13 +277,13 @@  static unsigned long clk_pll14xx_recalc_rate(struct clk_hw *hw,
 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
 	u32 mdiv, pdiv, sdiv, kdiv, pll_div_ctl0, pll_div_ctl1;
 
-	pll_div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
+	pll_div_ctl0 = imx_pll14xx_readl(&pll->div_ctl0);
 	mdiv = FIELD_GET(MDIV_MASK, pll_div_ctl0);
 	pdiv = FIELD_GET(PDIV_MASK, pll_div_ctl0);
 	sdiv = FIELD_GET(SDIV_MASK, pll_div_ctl0);
 
 	if (pll->type == PLL_1443X) {
-		pll_div_ctl1 = readl_relaxed(pll->base + DIV_CTL1);
+		pll_div_ctl1 = imx_pll14xx_readl(&pll->div_ctl1);
 		kdiv = FIELD_GET(KDIV_MASK, pll_div_ctl1);
 	} else {
 		kdiv = 0;
@@ -275,10 +305,22 @@  static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *ra
 
 static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
 {
+	struct imx_clk_iomap *reg = &pll->gnrl_ctl;
 	u32 val;
 
-	return readl_poll_timeout(pll->base + GNRL_CTL, val, val & LOCK_STATUS, 0,
-			LOCK_TIMEOUT_US);
+	if (reg->mem)
+		return readl_poll_timeout(reg->mem + reg->offset, val,
+					  val & LOCK_STATUS, 0,
+					  LOCK_TIMEOUT_US);
+
+	if (reg->regmap)
+		return regmap_read_poll_timeout(reg->regmap, reg->offset, val,
+						val & LOCK_STATUS, 0,
+						LOCK_TIMEOUT_US);
+
+	pr_err("%s: memory address not set\n", __func__);
+
+	return -EIO;
 }
 
 static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
@@ -296,32 +338,32 @@  static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
 		return -EINVAL;
 	}
 
-	tmp = readl_relaxed(pll->base + DIV_CTL0);
+	tmp = imx_pll14xx_readl(&pll->div_ctl0);
 
 	if (!clk_pll14xx_mp_change(rate, tmp)) {
 		tmp &= ~SDIV_MASK;
 		tmp |= FIELD_PREP(SDIV_MASK, rate->sdiv);
-		writel_relaxed(tmp, pll->base + DIV_CTL0);
+		imx_pll14xx_writel(tmp, &pll->div_ctl0);
 
 		return 0;
 	}
 
 	/* Bypass clock and set lock to pll output lock */
-	tmp = readl_relaxed(pll->base + GNRL_CTL);
+	tmp = imx_pll14xx_readl(&pll->gnrl_ctl);
 	tmp |= LOCK_SEL_MASK;
-	writel_relaxed(tmp, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(tmp, &pll->gnrl_ctl);
 
 	/* Enable RST */
 	tmp &= ~RST_MASK;
-	writel_relaxed(tmp, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(tmp, &pll->gnrl_ctl);
 
 	/* Enable BYPASS */
 	tmp |= BYPASS_MASK;
-	writel(tmp, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(tmp, &pll->gnrl_ctl);
 
 	div_val = FIELD_PREP(MDIV_MASK, rate->mdiv) | FIELD_PREP(PDIV_MASK, rate->pdiv) |
 		FIELD_PREP(SDIV_MASK, rate->sdiv);
-	writel_relaxed(div_val, pll->base + DIV_CTL0);
+	imx_pll14xx_writel(div_val, &pll->div_ctl0);
 
 	/*
 	 * According to SPEC, t3 - t2 need to be greater than
@@ -333,7 +375,7 @@  static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
 
 	/* Disable RST */
 	tmp |= RST_MASK;
-	writel_relaxed(tmp, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(tmp, &pll->gnrl_ctl);
 
 	/* Wait Lock */
 	ret = clk_pll14xx_wait_lock(pll);
@@ -342,7 +384,7 @@  static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
 
 	/* Bypass */
 	tmp &= ~BYPASS_MASK;
-	writel_relaxed(tmp, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(tmp, &pll->gnrl_ctl);
 
 	return 0;
 }
@@ -357,35 +399,35 @@  static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
 
 	imx_pll14xx_calc_settings(pll, drate, prate, &rate);
 
-	div_ctl0 = readl_relaxed(pll->base + DIV_CTL0);
+	div_ctl0 = imx_pll14xx_readl(&pll->div_ctl0);
 
 	if (!clk_pll14xx_mp_change(&rate, div_ctl0)) {
 		/* only sdiv and/or kdiv changed - no need to RESET PLL */
 		div_ctl0 &= ~SDIV_MASK;
 		div_ctl0 |= FIELD_PREP(SDIV_MASK, rate.sdiv);
-		writel_relaxed(div_ctl0, pll->base + DIV_CTL0);
+		imx_pll14xx_writel(div_ctl0, &pll->div_ctl0);
 
-		writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv),
-			       pll->base + DIV_CTL1);
+		imx_pll14xx_writel(FIELD_PREP(KDIV_MASK, rate.kdiv),
+				   &pll->div_ctl1);
 
 		return 0;
 	}
 
 	/* Enable RST */
-	gnrl_ctl = readl_relaxed(pll->base + GNRL_CTL);
+	gnrl_ctl = imx_pll14xx_readl(&pll->gnrl_ctl);
 	gnrl_ctl &= ~RST_MASK;
-	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(gnrl_ctl, &pll->gnrl_ctl);
 
 	/* Enable BYPASS */
 	gnrl_ctl |= BYPASS_MASK;
-	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(gnrl_ctl, &pll->gnrl_ctl);
 
 	div_ctl0 = FIELD_PREP(MDIV_MASK, rate.mdiv) |
 		   FIELD_PREP(PDIV_MASK, rate.pdiv) |
 		   FIELD_PREP(SDIV_MASK, rate.sdiv);
-	writel_relaxed(div_ctl0, pll->base + DIV_CTL0);
+	imx_pll14xx_writel(div_ctl0, &pll->div_ctl0);
 
-	writel_relaxed(FIELD_PREP(KDIV_MASK, rate.kdiv), pll->base + DIV_CTL1);
+	imx_pll14xx_writel(FIELD_PREP(KDIV_MASK, rate.kdiv), &pll->div_ctl1);
 
 	/*
 	 * According to SPEC, t3 - t2 need to be greater than
@@ -397,7 +439,7 @@  static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
 
 	/* Disable RST */
 	gnrl_ctl |= RST_MASK;
-	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(gnrl_ctl, &pll->gnrl_ctl);
 
 	/* Wait Lock*/
 	ret = clk_pll14xx_wait_lock(pll);
@@ -406,7 +448,7 @@  static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
 
 	/* Bypass */
 	gnrl_ctl &= ~BYPASS_MASK;
-	writel_relaxed(gnrl_ctl, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(gnrl_ctl, &pll->gnrl_ctl);
 
 	return 0;
 }
@@ -421,20 +463,20 @@  static int clk_pll14xx_prepare(struct clk_hw *hw)
 	 * RESETB = 1 from 0, PLL starts its normal
 	 * operation after lock time
 	 */
-	val = readl_relaxed(pll->base + GNRL_CTL);
+	val = imx_pll14xx_readl(&pll->gnrl_ctl);
 	if (val & RST_MASK)
 		return 0;
 	val |= BYPASS_MASK;
-	writel_relaxed(val, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(val, &pll->gnrl_ctl);
 	val |= RST_MASK;
-	writel_relaxed(val, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(val, &pll->gnrl_ctl);
 
 	ret = clk_pll14xx_wait_lock(pll);
 	if (ret)
 		return ret;
 
 	val &= ~BYPASS_MASK;
-	writel_relaxed(val, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(val, &pll->gnrl_ctl);
 
 	return 0;
 }
@@ -444,7 +486,7 @@  static int clk_pll14xx_is_prepared(struct clk_hw *hw)
 	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
 	u32 val;
 
-	val = readl_relaxed(pll->base + GNRL_CTL);
+	val = imx_pll14xx_readl(&pll->gnrl_ctl);
 
 	return (val & RST_MASK) ? 1 : 0;
 }
@@ -458,9 +500,9 @@  static void clk_pll14xx_unprepare(struct clk_hw *hw)
 	 * Set RST to 0, power down mode is enabled and
 	 * every digital block is reset
 	 */
-	val = readl_relaxed(pll->base + GNRL_CTL);
+	val = imx_pll14xx_readl(&pll->gnrl_ctl);
 	val &= ~RST_MASK;
-	writel_relaxed(val, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(val, &pll->gnrl_ctl);
 }
 
 static const struct clk_ops clk_pll1416x_ops = {
@@ -485,13 +527,23 @@  static const struct clk_ops clk_pll1443x_ops = {
 	.set_rate	= clk_pll1443x_set_rate,
 };
 
-struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
-				const char *parent_name, void __iomem *base,
-				const struct imx_pll14xx_clk *pll_clk)
+static void imx_clk_hw_unregister_pll14xx(struct clk_hw *hw)
 {
+	struct clk_pll14xx *pll = to_clk_pll14xx(hw);
+
+	clk_hw_unregister(hw);
+	kfree(pll);
+}
+
+static struct clk_hw *
+imx_clk_hw_register_pll14xx(struct device *dev, const char *name,
+			    const char *parent_name,
+			    struct imx_clk_iomap *iomap,
+			    const struct imx_pll14xx_clk *pll_clk)
+{
+	struct clk_init_data init = { NULL };
 	struct clk_pll14xx *pll;
 	struct clk_hw *hw;
-	struct clk_init_data init;
 	int ret;
 	u32 val;
 
@@ -520,18 +572,23 @@  struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
 		return ERR_PTR(-EINVAL);
 	}
 
-	pll->base = base;
+	memcpy(&pll->gnrl_ctl, iomap, sizeof(*iomap));
+	pll->gnrl_ctl.offset += GNRL_CTL;
+	memcpy(&pll->div_ctl0, iomap, sizeof(*iomap));
+	pll->div_ctl0.offset += DIV_CTL0;
+	memcpy(&pll->div_ctl1, iomap, sizeof(*iomap));
+	pll->div_ctl1.offset += DIV_CTL1;
+
 	pll->hw.init = &init;
 	pll->type = pll_clk->type;
 	pll->rate_table = pll_clk->rate_table;
 	pll->rate_count = pll_clk->rate_count;
 
-	val = readl_relaxed(pll->base + GNRL_CTL);
+	val = imx_pll14xx_readl(&pll->gnrl_ctl);
 	val &= ~BYPASS_MASK;
-	writel_relaxed(val, pll->base + GNRL_CTL);
+	imx_pll14xx_writel(val, &pll->gnrl_ctl);
 
 	hw = &pll->hw;
-
 	ret = clk_hw_register(dev, hw);
 	if (ret) {
 		pr_err("failed to register pll %s %d\n", name, ret);
@@ -541,4 +598,105 @@  struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
 
 	return hw;
 }
+
+struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
+				const char *parent_name, void __iomem *base,
+				const struct imx_pll14xx_clk *pll_clk)
+{
+	struct imx_clk_iomap _base = {};
+
+	_base.mem = base;
+	return imx_clk_hw_register_pll14xx(dev, name, parent_name, &_base,
+					   pll_clk);
+}
 EXPORT_SYMBOL_GPL(imx_dev_clk_hw_pll14xx);
+
+static struct clk_hw *_of_imx_pll14xx_clk_setup(struct device_node *node)
+{
+	struct clk_hw *hw;
+	struct imx_clk_iomap io;
+	struct device_node *parent_node;
+	const char *name, *parent_name;
+	const struct imx_pll14xx_clk *pll_clk;
+	const char *pll_type;
+	u32 val;
+	int ret;
+
+	parent_node = of_get_parent(node);
+	if (!parent_node) {
+		pr_err("%s: %pOFn must have 1 parent\n", __func__, node);
+		return ERR_PTR(-ENODEV);
+	}
+
+	io.mem = 0;
+	io.regmap = syscon_node_to_regmap(parent_node);
+	of_node_put(parent_node);
+	if (IS_ERR(io.regmap)) {
+		pr_err("%s: missing regmap for %pOFn\n", __func__, node);
+		return ERR_CAST(io.regmap);
+	}
+
+	if (of_property_read_u32(node, "fsl,regmap-offset", &val)) {
+		pr_err("%s: missing regmap offset for %pOFn\n", __func__,
+		       node);
+		return ERR_PTR(-EIO);
+	}
+
+	io.offset = val;
+	if (of_clk_get_parent_count(node) != 1) {
+		pr_err("%s: %pOFn must have 1 parent clock\n", __func__, node);
+		return ERR_PTR(-EIO);
+	}
+
+	parent_name = of_clk_get_parent_name(node, 0);
+	name = imx_dt_clk_name(node);
+	if (of_property_read_string(node, "fsl,type", &pll_type)) {
+		pr_err("%s: missing 'fsl,type' for %pOFn\n", __func__,
+		       node);
+		return ERR_PTR(-EIO);
+	}
+
+	if (!strcmp(pll_type, "1443x")) {
+		if (of_property_read_bool(node, "fsl,get-rate-nocache"))
+			pll_clk = &imx_1443x_dram_pll;
+		else
+			pll_clk = &imx_1443x_pll;
+	} else if (!strcmp(pll_type, "1416x")) {
+		pll_clk = &imx_1416x_pll;
+	} else {
+		pr_err("%s: failed to get pll clock for %pOFn\n", __func__,
+		       node);
+		return ERR_PTR(-EIO);
+	}
+
+	hw = imx_clk_hw_register_pll14xx(NULL, name, parent_name, &io, pll_clk);
+	if (IS_ERR(hw)) {
+		/*
+		 * Clear OF_POPULATED flag so that clock registration can be
+		 * attempted again from probe function.
+		 */
+		of_node_clear_flag(node, OF_POPULATED);
+		return ERR_CAST(hw);
+	}
+
+	ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, hw);
+	if (ret) {
+		imx_clk_hw_unregister_pll14xx(hw);
+		return ERR_PTR(ret);
+	}
+
+	pr_debug("%s: name: %s, parent: %s, offset: 0x%x, pll_type: %s\n",
+		 __func__, name, parent_name, io.offset, pll_type);
+
+	return hw;
+}
+
+/**
+ * of_imx_pll14xx_clk_setup() - Setup function for imx pll14xx clock
+ * @node:	device node for the clock
+ */
+void __init of_imx_pll14xx_clk_setup(struct device_node *node)
+{
+	_of_imx_pll14xx_clk_setup(node);
+}
+CLK_OF_DECLARE(fsl_pll14xx_clk, "fsl,pll14xx-clock", of_imx_pll14xx_clk_setup);