Release 4.15 drivers/clk/samsung/clk.c
  
  
  
/*
 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
 * Copyright (c) 2013 Linaro Ltd.
 * Author: Thomas Abraham <thomas.ab@samsung.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This file includes utility functions to register clocks to common
 * clock framework for Samsung platforms.
*/
#include <linux/slab.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
#include "clk.h"
static LIST_HEAD(clock_reg_cache_list);
void samsung_clk_save(void __iomem *base,
				    struct samsung_clk_reg_dump *rd,
				    unsigned int num_regs)
{
	for (; num_regs > 0; --num_regs, ++rd)
		rd->value = readl(base + rd->offset);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Tomasz Figa | 45 | 100.00% | 1 | 100.00% | 
| Total | 45 | 100.00% | 1 | 100.00% | 
void samsung_clk_restore(void __iomem *base,
				      const struct samsung_clk_reg_dump *rd,
				      unsigned int num_regs)
{
	for (; num_regs > 0; --num_regs, ++rd)
		writel(rd->value, base + rd->offset);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Tomasz Figa | 46 | 100.00% | 1 | 100.00% | 
| Total | 46 | 100.00% | 1 | 100.00% | 
struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
						const unsigned long *rdump,
						unsigned long nr_rdump)
{
	struct samsung_clk_reg_dump *rd;
	unsigned int i;
	rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
	if (!rd)
		return NULL;
	for (i = 0; i < nr_rdump; ++i)
		rd[i].offset = rdump[i];
	return rd;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Tomasz Figa | 77 | 100.00% | 2 | 100.00% | 
| Total | 77 | 100.00% | 2 | 100.00% | 
/* setup the essentials required to support clock lookup using ccf */
struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
			void __iomem *base, unsigned long nr_clks)
{
	struct samsung_clk_provider *ctx;
	int i;
	ctx = kzalloc(sizeof(struct samsung_clk_provider) +
		      sizeof(*ctx->clk_data.hws) * nr_clks, GFP_KERNEL);
	if (!ctx)
		panic("could not allocate clock provider context.\n");
	for (i = 0; i < nr_clks; ++i)
		ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
	ctx->reg_base = base;
	ctx->clk_data.num = nr_clks;
	spin_lock_init(&ctx->lock);
	return ctx;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Rahul Sharma | 33 | 27.97% | 1 | 16.67% | 
| Tomasz Figa | 25 | 21.19% | 1 | 16.67% | 
| Heiko Stübner | 21 | 17.80% | 1 | 16.67% | 
| Thomas Abraham | 19 | 16.10% | 1 | 16.67% | 
| Marek Szyprowski | 16 | 13.56% | 1 | 16.67% | 
| Sylwester Nawrocki | 4 | 3.39% | 1 | 16.67% | 
| Total | 118 | 100.00% | 6 | 100.00% | 
void __init samsung_clk_of_add_provider(struct device_node *np,
				struct samsung_clk_provider *ctx)
{
	if (np) {
		if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
					&ctx->clk_data))
			panic("could not register clk provider\n");
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Sylwester Nawrocki | 20 | 48.78% | 1 | 20.00% | 
| Rahul Sharma | 11 | 26.83% | 1 | 20.00% | 
| Heiko Stübner | 7 | 17.07% | 1 | 20.00% | 
| Marek Szyprowski | 2 | 4.88% | 1 | 20.00% | 
| Thomas Abraham | 1 | 2.44% | 1 | 20.00% | 
| Total | 41 | 100.00% | 5 | 100.00% | 
/* add a clock instance to the clock lookup table used for dt based lookup */
void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
			    struct clk_hw *clk_hw, unsigned int id)
{
	if (id)
		ctx->clk_data.hws[id] = clk_hw;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Abraham | 21 | 61.76% | 1 | 33.33% | 
| Rahul Sharma | 9 | 26.47% | 1 | 33.33% | 
| Marek Szyprowski | 4 | 11.76% | 1 | 33.33% | 
| Total | 34 | 100.00% | 3 | 100.00% | 
/* register a list of aliases */
void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
				const struct samsung_clock_alias *list,
				unsigned int nr_clk)
{
	struct clk_hw *clk_hw;
	unsigned int idx, ret;
	for (idx = 0; idx < nr_clk; idx++, list++) {
		if (!list->id) {
			pr_err("%s: clock id missing for index %d\n", __func__,
				idx);
			continue;
		}
		clk_hw = ctx->clk_data.hws[list->id];
		if (!clk_hw) {
			pr_err("%s: failed to find clock %d\n", __func__,
				list->id);
			continue;
		}
		ret = clk_hw_register_clkdev(clk_hw, list->alias,
					     list->dev_name);
		if (ret)
			pr_err("%s: failed to register lookup %s\n",
					__func__, list->alias);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Heiko Stübner | 114 | 87.02% | 1 | 25.00% | 
| Rahul Sharma | 9 | 6.87% | 1 | 25.00% | 
| Marek Szyprowski | 7 | 5.34% | 1 | 25.00% | 
| Uwe Kleine-König | 1 | 0.76% | 1 | 25.00% | 
| Total | 131 | 100.00% | 4 | 100.00% | 
/* register a list of fixed clocks */
void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx,
		const struct samsung_fixed_rate_clock *list,
		unsigned int nr_clk)
{
	struct clk_hw *clk_hw;
	unsigned int idx, ret;
	for (idx = 0; idx < nr_clk; idx++, list++) {
		clk_hw = clk_hw_register_fixed_rate(ctx->dev, list->name,
			list->parent_name, list->flags, list->fixed_rate);
		if (IS_ERR(clk_hw)) {
			pr_err("%s: failed to register clock %s\n", __func__,
				list->name);
			continue;
		}
		samsung_clk_add_lookup(ctx, clk_hw, list->id);
		/*
                 * Unconditionally add a clock lookup for the fixed rate clocks.
                 * There are not many of these on any of Samsung platforms.
                 */
		ret = clk_hw_register_clkdev(clk_hw, list->name, NULL);
		if (ret)
			pr_err("%s: failed to register clock lookup for %s",
				__func__, list->name);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Abraham | 117 | 86.03% | 1 | 20.00% | 
| Marek Szyprowski | 11 | 8.09% | 2 | 40.00% | 
| Rahul Sharma | 7 | 5.15% | 1 | 20.00% | 
| Uwe Kleine-König | 1 | 0.74% | 1 | 20.00% | 
| Total | 136 | 100.00% | 5 | 100.00% | 
/* register a list of fixed factor clocks */
void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx,
		const struct samsung_fixed_factor_clock *list, unsigned int nr_clk)
{
	struct clk_hw *clk_hw;
	unsigned int idx;
	for (idx = 0; idx < nr_clk; idx++, list++) {
		clk_hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
			list->parent_name, list->flags, list->mult, list->div);
		if (IS_ERR(clk_hw)) {
			pr_err("%s: failed to register clock %s\n", __func__,
				list->name);
			continue;
		}
		samsung_clk_add_lookup(ctx, clk_hw, list->id);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Abraham | 92 | 84.40% | 1 | 20.00% | 
| Marek Szyprowski | 9 | 8.26% | 2 | 40.00% | 
| Rahul Sharma | 7 | 6.42% | 1 | 20.00% | 
| Uwe Kleine-König | 1 | 0.92% | 1 | 20.00% | 
| Total | 109 | 100.00% | 5 | 100.00% | 
/* register a list of mux clocks */
void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
				const struct samsung_mux_clock *list,
				unsigned int nr_clk)
{
	struct clk_hw *clk_hw;
	unsigned int idx;
	for (idx = 0; idx < nr_clk; idx++, list++) {
		clk_hw = clk_hw_register_mux(ctx->dev, list->name,
			list->parent_names, list->num_parents, list->flags,
			ctx->reg_base + list->offset,
			list->shift, list->width, list->mux_flags, &ctx->lock);
		if (IS_ERR(clk_hw)) {
			pr_err("%s: failed to register clock %s\n", __func__,
				list->name);
			continue;
		}
		samsung_clk_add_lookup(ctx, clk_hw, list->id);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Abraham | 109 | 83.85% | 1 | 20.00% | 
| Rahul Sharma | 11 | 8.46% | 1 | 20.00% | 
| Marek Szyprowski | 9 | 6.92% | 2 | 40.00% | 
| Uwe Kleine-König | 1 | 0.77% | 1 | 20.00% | 
| Total | 130 | 100.00% | 5 | 100.00% | 
/* register a list of div clocks */
void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
				const struct samsung_div_clock *list,
				unsigned int nr_clk)
{
	struct clk_hw *clk_hw;
	unsigned int idx;
	for (idx = 0; idx < nr_clk; idx++, list++) {
		if (list->table)
			clk_hw = clk_hw_register_divider_table(ctx->dev,
				list->name, list->parent_name, list->flags,
				ctx->reg_base + list->offset,
				list->shift, list->width, list->div_flags,
				list->table, &ctx->lock);
		else
			clk_hw = clk_hw_register_divider(ctx->dev, list->name,
				list->parent_name, list->flags,
				ctx->reg_base + list->offset, list->shift,
				list->width, list->div_flags, &ctx->lock);
		if (IS_ERR(clk_hw)) {
			pr_err("%s: failed to register clock %s\n", __func__,
				list->name);
			continue;
		}
		samsung_clk_add_lookup(ctx, clk_hw, list->id);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Abraham | 105 | 57.38% | 1 | 16.67% | 
| Heiko Stübner | 48 | 26.23% | 1 | 16.67% | 
| Rahul Sharma | 15 | 8.20% | 1 | 16.67% | 
| Marek Szyprowski | 14 | 7.65% | 2 | 33.33% | 
| Uwe Kleine-König | 1 | 0.55% | 1 | 16.67% | 
| Total | 183 | 100.00% | 6 | 100.00% | 
/* register a list of gate clocks */
void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
				const struct samsung_gate_clock *list,
				unsigned int nr_clk)
{
	struct clk_hw *clk_hw;
	unsigned int idx;
	for (idx = 0; idx < nr_clk; idx++, list++) {
		clk_hw = clk_hw_register_gate(ctx->dev, list->name, list->parent_name,
				list->flags, ctx->reg_base + list->offset,
				list->bit_idx, list->gate_flags, &ctx->lock);
		if (IS_ERR(clk_hw)) {
			pr_err("%s: failed to register clock %s\n", __func__,
				list->name);
			continue;
		}
		samsung_clk_add_lookup(ctx, clk_hw, list->id);
	}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Abraham | 101 | 82.79% | 1 | 20.00% | 
| Rahul Sharma | 11 | 9.02% | 1 | 20.00% | 
| Marek Szyprowski | 9 | 7.38% | 2 | 40.00% | 
| Uwe Kleine-König | 1 | 0.82% | 1 | 20.00% | 
| Total | 122 | 100.00% | 5 | 100.00% | 
/*
 * obtain the clock speed of all external fixed clock sources from device
 * tree and register it
 */
void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
			struct samsung_fixed_rate_clock *fixed_rate_clk,
			unsigned int nr_fixed_rate_clk,
			const struct of_device_id *clk_matches)
{
	const struct of_device_id *match;
	struct device_node *clk_np;
	u32 freq;
	for_each_matching_node_and_match(clk_np, clk_matches, &match) {
		if (of_property_read_u32(clk_np, "clock-frequency", &freq))
			continue;
		fixed_rate_clk[(unsigned long)match->data].fixed_rate = freq;
	}
	samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Abraham | 74 | 85.06% | 1 | 25.00% | 
| Rahul Sharma | 10 | 11.49% | 1 | 25.00% | 
| Pankaj Dubey | 2 | 2.30% | 1 | 25.00% | 
| Krzysztof Kozlowski | 1 | 1.15% | 1 | 25.00% | 
| Total | 87 | 100.00% | 4 | 100.00% | 
/* utility function to get the rate of a specified clock */
unsigned long _get_rate(const char *clk_name)
{
	struct clk *clk;
	clk = __clk_lookup(clk_name);
	if (!clk) {
		pr_err("%s: could not find clock %s\n", __func__, clk_name);
		return 0;
	}
	return clk_get_rate(clk);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Abraham | 45 | 93.75% | 1 | 50.00% | 
| Tomasz Figa | 3 | 6.25% | 1 | 50.00% | 
| Total | 48 | 100.00% | 2 | 100.00% | 
#ifdef CONFIG_PM_SLEEP
static int samsung_clk_suspend(void)
{
	struct samsung_clock_reg_cache *reg_cache;
	list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
		samsung_clk_save(reg_cache->reg_base, reg_cache->rdump,
				reg_cache->rd_num);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Naveen Krishna Chatradhi | 39 | 100.00% | 1 | 100.00% | 
| Total | 39 | 100.00% | 1 | 100.00% | 
static void samsung_clk_resume(void)
{
	struct samsung_clock_reg_cache *reg_cache;
	list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
		samsung_clk_restore(reg_cache->reg_base, reg_cache->rdump,
				reg_cache->rd_num);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Naveen Krishna Chatradhi | 36 | 100.00% | 1 | 100.00% | 
| Total | 36 | 100.00% | 1 | 100.00% | 
static struct syscore_ops samsung_clk_syscore_ops = {
	.suspend = samsung_clk_suspend,
	.resume = samsung_clk_resume,
};
void samsung_clk_sleep_init(void __iomem *reg_base,
			const unsigned long *rdump,
			unsigned long nr_rdump)
{
	struct samsung_clock_reg_cache *reg_cache;
	reg_cache = kzalloc(sizeof(struct samsung_clock_reg_cache),
			GFP_KERNEL);
	if (!reg_cache)
		panic("could not allocate register reg_cache.\n");
	reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
	if (!reg_cache->rdump)
		panic("could not allocate register dump storage.\n");
	if (list_empty(&clock_reg_cache_list))
		register_syscore_ops(&samsung_clk_syscore_ops);
	reg_cache->reg_base = reg_base;
	reg_cache->rd_num = nr_rdump;
	list_add_tail(®_cache->node, &clock_reg_cache_list);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Naveen Krishna Chatradhi | 108 | 100.00% | 1 | 100.00% | 
| Total | 108 | 100.00% | 1 | 100.00% | 
#else
void samsung_clk_sleep_init(void __iomem *reg_base,
			const unsigned long *rdump,
			unsigned long nr_rdump) {}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Naveen Krishna Chatradhi | 19 | 100.00% | 1 | 100.00% | 
| Total | 19 | 100.00% | 1 | 100.00% | 
#endif
/*
 * Common function which registers plls, muxes, dividers and gates
 * for each CMU. It also add CMU register list to register cache.
 */
struct samsung_clk_provider * __init samsung_cmu_register_one(
			struct device_node *np,
			const struct samsung_cmu_info *cmu)
{
	void __iomem *reg_base;
	struct samsung_clk_provider *ctx;
	reg_base = of_iomap(np, 0);
	if (!reg_base) {
		panic("%s: failed to map registers\n", __func__);
		return NULL;
	}
	ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
	if (!ctx) {
		panic("%s: unable to allocate ctx\n", __func__);
		return ctx;
	}
	if (cmu->pll_clks)
		samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
			reg_base);
	if (cmu->mux_clks)
		samsung_clk_register_mux(ctx, cmu->mux_clks,
			cmu->nr_mux_clks);
	if (cmu->div_clks)
		samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
	if (cmu->gate_clks)
		samsung_clk_register_gate(ctx, cmu->gate_clks,
			cmu->nr_gate_clks);
	if (cmu->fixed_clks)
		samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
			cmu->nr_fixed_clks);
	if (cmu->fixed_factor_clks)
		samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
			cmu->nr_fixed_factor_clks);
	if (cmu->clk_regs)
		samsung_clk_sleep_init(reg_base, cmu->clk_regs,
			cmu->nr_clk_regs);
	samsung_clk_of_add_provider(np, ctx);
	return ctx;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Naveen Krishna Chatradhi | 212 | 92.17% | 2 | 40.00% | 
| Chanwoo Choi | 16 | 6.96% | 1 | 20.00% | 
| Shailendra Verma | 1 | 0.43% | 1 | 20.00% | 
| Krzysztof Kozlowski | 1 | 0.43% | 1 | 20.00% | 
| Total | 230 | 100.00% | 5 | 100.00% | 
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Abraham | 700 | 38.85% | 1 | 4.55% | 
| Naveen Krishna Chatradhi | 445 | 24.69% | 2 | 9.09% | 
| Tomasz Figa | 196 | 10.88% | 4 | 18.18% | 
| Heiko Stübner | 191 | 10.60% | 3 | 13.64% | 
| Rahul Sharma | 123 | 6.83% | 1 | 4.55% | 
| Marek Szyprowski | 81 | 4.50% | 2 | 9.09% | 
| Sylwester Nawrocki | 24 | 1.33% | 1 | 4.55% | 
| Chanwoo Choi | 16 | 0.89% | 1 | 4.55% | 
| Stephen Boyd | 12 | 0.67% | 1 | 4.55% | 
| Uwe Kleine-König | 6 | 0.33% | 1 | 4.55% | 
| Pankaj Dubey | 5 | 0.28% | 2 | 9.09% | 
| Krzysztof Kozlowski | 2 | 0.11% | 2 | 9.09% | 
| Shailendra Verma | 1 | 0.06% | 1 | 4.55% | 
| Total | 1802 | 100.00% | 22 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.