Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
J Keerthy | 1351 | 68.03% | 1 | 4.17% |
Tero Kristo | 545 | 27.44% | 9 | 37.50% |
Tony Lindgren | 34 | 1.71% | 1 | 4.17% |
Stephen Boyd | 19 | 0.96% | 3 | 12.50% |
Rob Herring | 7 | 0.35% | 1 | 4.17% |
Dinh Nguyen | 6 | 0.30% | 1 | 4.17% |
Julia Lawall | 6 | 0.30% | 2 | 8.33% |
Kees Cook | 5 | 0.25% | 1 | 4.17% |
Michael Turquette | 5 | 0.25% | 1 | 4.17% |
Dario Binacchi | 4 | 0.20% | 1 | 4.17% |
Thomas Gleixner | 2 | 0.10% | 1 | 4.17% |
Dan Carpenter | 1 | 0.05% | 1 | 4.17% |
Bhumika Goyal | 1 | 0.05% | 1 | 4.17% |
Total | 1986 | 24 |
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP APLL clock support * * Copyright (C) 2013 Texas Instruments, Inc. * * J Keerthy <j-keerthy@ti.com> */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/err.h> #include <linux/string.h> #include <linux/log2.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/clk/ti.h> #include <linux/delay.h> #include "clock.h" #define APLL_FORCE_LOCK 0x1 #define APLL_AUTO_IDLE 0x2 #define MAX_APLL_WAIT_TRIES 1000000 #undef pr_fmt #define pr_fmt(fmt) "%s: " fmt, __func__ static int dra7_apll_enable(struct clk_hw *hw) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); int r = 0, i = 0; struct dpll_data *ad; const char *clk_name; u8 state = 1; u32 v; ad = clk->dpll_data; if (!ad) return -EINVAL; clk_name = clk_hw_get_name(&clk->hw); state <<= __ffs(ad->idlest_mask); /* Check is already locked */ v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg); if ((v & ad->idlest_mask) == state) return r; v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= APLL_FORCE_LOCK << __ffs(ad->enable_mask); ti_clk_ll_ops->clk_writel(v, &ad->control_reg); state <<= __ffs(ad->idlest_mask); while (1) { v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg); if ((v & ad->idlest_mask) == state) break; if (i > MAX_APLL_WAIT_TRIES) break; i++; udelay(1); } if (i == MAX_APLL_WAIT_TRIES) { pr_warn("clock: %s failed transition to '%s'\n", clk_name, (state) ? "locked" : "bypassed"); r = -EBUSY; } else pr_debug("clock: %s transition to '%s' in %d loops\n", clk_name, (state) ? "locked" : "bypassed", i); return r; } static void dra7_apll_disable(struct clk_hw *hw) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *ad; u8 state = 1; u32 v; ad = clk->dpll_data; state <<= __ffs(ad->idlest_mask); v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= APLL_AUTO_IDLE << __ffs(ad->enable_mask); ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } static int dra7_apll_is_enabled(struct clk_hw *hw) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *ad; u32 v; ad = clk->dpll_data; v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ad->enable_mask; v >>= __ffs(ad->enable_mask); return v == APLL_AUTO_IDLE ? 0 : 1; } static u8 dra7_init_apll_parent(struct clk_hw *hw) { return 0; } static const struct clk_ops apll_ck_ops = { .enable = &dra7_apll_enable, .disable = &dra7_apll_disable, .is_enabled = &dra7_apll_is_enabled, .get_parent = &dra7_init_apll_parent, }; static void __init omap_clk_register_apll(void *user, struct device_node *node) { struct clk_hw *hw = user; struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); struct dpll_data *ad = clk_hw->dpll_data; const char *name; struct clk *clk; const struct clk_init_data *init = clk_hw->hw.init; clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_debug("clk-ref for %pOFn not ready, retry\n", node); if (!ti_clk_retry_init(node, hw, omap_clk_register_apll)) return; goto cleanup; } ad->clk_ref = __clk_get_hw(clk); clk = of_clk_get(node, 1); if (IS_ERR(clk)) { pr_debug("clk-bypass for %pOFn not ready, retry\n", node); if (!ti_clk_retry_init(node, hw, omap_clk_register_apll)) return; goto cleanup; } ad->clk_bypass = __clk_get_hw(clk); name = ti_dt_clk_name(node); clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name); if (!IS_ERR(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(init->parent_names); kfree(init); return; } cleanup: kfree(clk_hw->dpll_data); kfree(init->parent_names); kfree(init); kfree(clk_hw); } static void __init of_dra7_apll_setup(struct device_node *node) { struct dpll_data *ad = NULL; struct clk_hw_omap *clk_hw = NULL; struct clk_init_data *init = NULL; const char **parent_names = NULL; int ret; ad = kzalloc(sizeof(*ad), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); init = kzalloc(sizeof(*init), GFP_KERNEL); if (!ad || !clk_hw || !init) goto cleanup; clk_hw->dpll_data = ad; clk_hw->hw.init = init; init->name = ti_dt_clk_name(node); init->ops = &apll_ck_ops; init->num_parents = of_clk_get_parent_count(node); if (init->num_parents < 1) { pr_err("dra7 apll %pOFn must have parent(s)\n", node); goto cleanup; } parent_names = kcalloc(init->num_parents, sizeof(char *), GFP_KERNEL); if (!parent_names) goto cleanup; of_clk_parent_fill(node, parent_names, init->num_parents); init->parent_names = parent_names; ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg); ret |= ti_clk_get_reg_addr(node, 1, &ad->idlest_reg); if (ret) goto cleanup; ad->idlest_mask = 0x1; ad->enable_mask = 0x3; omap_clk_register_apll(&clk_hw->hw, node); return; cleanup: kfree(parent_names); kfree(ad); kfree(clk_hw); kfree(init); } CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup); #define OMAP2_EN_APLL_LOCKED 0x3 #define OMAP2_EN_APLL_STOPPED 0x0 static int omap2_apll_is_enabled(struct clk_hw *hw) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *ad = clk->dpll_data; u32 v; v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ad->enable_mask; v >>= __ffs(ad->enable_mask); return v == OMAP2_EN_APLL_LOCKED ? 1 : 0; } static unsigned long omap2_apll_recalc(struct clk_hw *hw, unsigned long parent_rate) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); if (omap2_apll_is_enabled(hw)) return clk->fixed_rate; return 0; } static int omap2_apll_enable(struct clk_hw *hw) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *ad = clk->dpll_data; u32 v; int i = 0; v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask); ti_clk_ll_ops->clk_writel(v, &ad->control_reg); while (1) { v = ti_clk_ll_ops->clk_readl(&ad->idlest_reg); if (v & ad->idlest_mask) break; if (i > MAX_APLL_WAIT_TRIES) break; i++; udelay(1); } if (i == MAX_APLL_WAIT_TRIES) { pr_warn("%s failed to transition to locked\n", clk_hw_get_name(&clk->hw)); return -EBUSY; } return 0; } static void omap2_apll_disable(struct clk_hw *hw) { struct clk_hw_omap *clk = to_clk_hw_omap(hw); struct dpll_data *ad = clk->dpll_data; u32 v; v = ti_clk_ll_ops->clk_readl(&ad->control_reg); v &= ~ad->enable_mask; v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask); ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } static const struct clk_ops omap2_apll_ops = { .enable = &omap2_apll_enable, .disable = &omap2_apll_disable, .is_enabled = &omap2_apll_is_enabled, .recalc_rate = &omap2_apll_recalc, }; static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val) { struct dpll_data *ad = clk->dpll_data; u32 v; v = ti_clk_ll_ops->clk_readl(&ad->autoidle_reg); v &= ~ad->autoidle_mask; v |= val << __ffs(ad->autoidle_mask); ti_clk_ll_ops->clk_writel(v, &ad->control_reg); } #define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3 #define OMAP2_APLL_AUTOIDLE_DISABLE 0x0 static void omap2_apll_allow_idle(struct clk_hw_omap *clk) { omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP); } static void omap2_apll_deny_idle(struct clk_hw_omap *clk) { omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE); } static const struct clk_hw_omap_ops omap2_apll_hwops = { .allow_idle = &omap2_apll_allow_idle, .deny_idle = &omap2_apll_deny_idle, }; static void __init of_omap2_apll_setup(struct device_node *node) { struct dpll_data *ad = NULL; struct clk_hw_omap *clk_hw = NULL; struct clk_init_data *init = NULL; const char *name; struct clk *clk; const char *parent_name; u32 val; int ret; ad = kzalloc(sizeof(*ad), GFP_KERNEL); clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); init = kzalloc(sizeof(*init), GFP_KERNEL); if (!ad || !clk_hw || !init) goto cleanup; clk_hw->dpll_data = ad; clk_hw->hw.init = init; init->ops = &omap2_apll_ops; name = ti_dt_clk_name(node); init->name = name; clk_hw->ops = &omap2_apll_hwops; init->num_parents = of_clk_get_parent_count(node); if (init->num_parents != 1) { pr_err("%pOFn must have one parent\n", node); goto cleanup; } parent_name = of_clk_get_parent_name(node, 0); init->parent_names = &parent_name; if (of_property_read_u32(node, "ti,clock-frequency", &val)) { pr_err("%pOFn missing clock-frequency\n", node); goto cleanup; } clk_hw->fixed_rate = val; if (of_property_read_u32(node, "ti,bit-shift", &val)) { pr_err("%pOFn missing bit-shift\n", node); goto cleanup; } clk_hw->enable_bit = val; ad->enable_mask = 0x3 << val; ad->autoidle_mask = 0x3 << val; if (of_property_read_u32(node, "ti,idlest-shift", &val)) { pr_err("%pOFn missing idlest-shift\n", node); goto cleanup; } ad->idlest_mask = 1 << val; ret = ti_clk_get_reg_addr(node, 0, &ad->control_reg); ret |= ti_clk_get_reg_addr(node, 1, &ad->autoidle_reg); ret |= ti_clk_get_reg_addr(node, 2, &ad->idlest_reg); if (ret) goto cleanup; name = ti_dt_clk_name(node); clk = of_ti_clk_register_omap_hw(node, &clk_hw->hw, name); if (!IS_ERR(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); kfree(init); return; } cleanup: kfree(ad); kfree(clk_hw); kfree(init); } CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock", of_omap2_apll_setup);
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1