Release 4.12 drivers/clocksource/time-armada-370-xp.c
  
  
  
/*
 * Marvell Armada 370/XP SoC timer handling.
 *
 * Copyright (C) 2012 Marvell
 *
 * Lior Amsalem <alior@marvell.com>
 * Gregory CLEMENT <gregory.clement@free-electrons.com>
 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2.  This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 *
 * Timer 0 is used as free-running clocksource, while timer 1 is
 * used as clock_event_device.
 *
 * ---
 * Clocksource driver for Armada 370 and Armada XP SoC.
 * This driver implements one compatible string for each SoC, given
 * each has its own characteristics:
 *
 *   * Armada 370 has no 25 MHz fixed timer.
 *
 *   * Armada XP cannot work properly without such 25 MHz fixed timer as
 *     doing otherwise leads to using a clocksource whose frequency varies
 *     when doing cpufreq frequency changes.
 *
 * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
 */
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/timer.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/sched_clock.h>
#include <linux/percpu.h>
#include <linux/syscore_ops.h>
#include <asm/delay.h>
/*
 * Timer block registers.
 */
#define TIMER_CTRL_OFF		0x0000
#define  TIMER0_EN		 BIT(0)
#define  TIMER0_RELOAD_EN	 BIT(1)
#define  TIMER0_25MHZ            BIT(11)
#define  TIMER0_DIV(div)         ((div) << 19)
#define  TIMER1_EN		 BIT(2)
#define  TIMER1_RELOAD_EN	 BIT(3)
#define  TIMER1_25MHZ            BIT(12)
#define  TIMER1_DIV(div)         ((div) << 22)
#define TIMER_EVENTS_STATUS	0x0004
#define  TIMER0_CLR_MASK         (~0x1)
#define  TIMER1_CLR_MASK         (~0x100)
#define TIMER0_RELOAD_OFF	0x0010
#define TIMER0_VAL_OFF		0x0014
#define TIMER1_RELOAD_OFF	0x0018
#define TIMER1_VAL_OFF		0x001c
#define LCL_TIMER_EVENTS_STATUS	0x0028
/* Global timers are connected to the coherency fabric clock, and the
   below divider reduces their incrementing frequency. */
#define TIMER_DIVIDER_SHIFT     5
#define TIMER_DIVIDER           (1 << TIMER_DIVIDER_SHIFT)
/*
 * SoC-specific data.
 */
static void __iomem *timer_base, *local_base;
static unsigned int timer_clk;
static bool timer25Mhz = true;
static u32 enable_mask;
/*
 * Number of timer ticks per jiffy.
 */
static u32 ticks_per_jiffy;
static struct clock_event_device __percpu *armada_370_xp_evt;
static void local_timer_ctrl_clrset(u32 clr, u32 set)
{
	writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
		local_base + TIMER_CTRL_OFF);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Ezequiel García | 33 | 100.00% | 1 | 100.00% | 
| Total | 33 | 100.00% | 1 | 100.00% | 
static u64 notrace armada_370_xp_read_sched_clock(void)
{
	return ~readl(timer_base + TIMER0_VAL_OFF);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Gregory CLEMENT | 17 | 94.44% | 1 | 50.00% | 
| Stephen Boyd | 1 | 5.56% | 1 | 50.00% | 
| Total | 18 | 100.00% | 2 | 100.00% | 
/*
 * Clockevent handling.
 */
static int
armada_370_xp_clkevt_next_event(unsigned long delta,
				struct clock_event_device *dev)
{
	/*
         * Clear clockevent timer interrupt.
         */
	writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
	/*
         * Setup new clockevent timer value.
         */
	writel(delta, local_base + TIMER0_VAL_OFF);
	/*
         * Enable the timer.
         */
	local_timer_ctrl_clrset(TIMER0_RELOAD_EN, enable_mask);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Gregory CLEMENT | 43 | 93.48% | 2 | 50.00% | 
| Ezequiel García | 3 | 6.52% | 2 | 50.00% | 
| Total | 46 | 100.00% | 4 | 100.00% | 
static int armada_370_xp_clkevt_shutdown(struct clock_event_device *evt)
{
	/*
         * Disable timer.
         */
	local_timer_ctrl_clrset(TIMER0_EN, 0);
	/*
         * ACK pending timer interrupt.
         */
	writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Viresh Kumar | 25 | 78.12% | 1 | 50.00% | 
| Gregory CLEMENT | 7 | 21.88% | 1 | 50.00% | 
| Total | 32 | 100.00% | 2 | 100.00% | 
static int armada_370_xp_clkevt_set_periodic(struct clock_event_device *evt)
{
	/*
         * Setup timer to fire at 1/HZ intervals.
         */
	writel(ticks_per_jiffy - 1, local_base + TIMER0_RELOAD_OFF);
	writel(ticks_per_jiffy - 1, local_base + TIMER0_VAL_OFF);
	/*
         * Enable timer.
         */
	local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | enable_mask);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Gregory CLEMENT | 28 | 59.57% | 2 | 40.00% | 
| Viresh Kumar | 13 | 27.66% | 1 | 20.00% | 
| Ezequiel García | 6 | 12.77% | 2 | 40.00% | 
| Total | 47 | 100.00% | 5 | 100.00% | 
static int armada_370_xp_clkevt_irq;
static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
{
	/*
         * ACK timer interrupt and call event handler.
         */
	struct clock_event_device *evt = dev_id;
	writel(TIMER0_CLR_MASK, local_base + LCL_TIMER_EVENTS_STATUS);
	evt->event_handler(evt);
	return IRQ_HANDLED;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Gregory CLEMENT | 40 | 100.00% | 2 | 100.00% | 
| Total | 40 | 100.00% | 2 | 100.00% | 
/*
 * Setup the local clock events for a CPU.
 */
static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
{
	struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
	u32 clr = 0, set = 0;
	if (timer25Mhz)
		set = TIMER0_25MHZ;
	else
		clr = TIMER0_25MHZ;
	local_timer_ctrl_clrset(clr, set);
	evt->name		= "armada_370_xp_per_cpu_tick",
	evt->features		= CLOCK_EVT_FEAT_ONESHOT |
				  CLOCK_EVT_FEAT_PERIODIC;
	evt->shift		= 32,
	evt->rating		= 300,
	evt->set_next_event	= armada_370_xp_clkevt_next_event,
	evt->set_state_shutdown	= armada_370_xp_clkevt_shutdown;
	evt->set_state_periodic	= armada_370_xp_clkevt_set_periodic;
	evt->set_state_oneshot	= armada_370_xp_clkevt_shutdown;
	evt->tick_resume	= armada_370_xp_clkevt_shutdown;
	evt->irq		= armada_370_xp_clkevt_irq;
	evt->cpumask		= cpumask_of(cpu);
	clockevents_config_and_register(evt, timer_clk, 1, 0xfffffffe);
	enable_percpu_irq(evt->irq, 0);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Gregory CLEMENT | 80 | 55.17% | 2 | 33.33% | 
| Viresh Kumar | 21 | 14.48% | 1 | 16.67% | 
| Ezequiel García | 16 | 11.03% | 1 | 16.67% | 
| Richard Cochran | 14 | 9.66% | 1 | 16.67% | 
| Stephen Boyd | 14 | 9.66% | 1 | 16.67% | 
| Total | 145 | 100.00% | 6 | 100.00% | 
static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
{
	struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
	evt->set_state_shutdown(evt);
	disable_percpu_irq(evt->irq);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Gregory CLEMENT | 19 | 48.72% | 1 | 25.00% | 
| Richard Cochran | 16 | 41.03% | 1 | 25.00% | 
| Stephen Boyd | 3 | 7.69% | 1 | 25.00% | 
| Viresh Kumar | 1 | 2.56% | 1 | 25.00% | 
| Total | 39 | 100.00% | 4 | 100.00% | 
static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
static int armada_370_xp_timer_suspend(void)
{
	timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
	timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Petazzoni | 29 | 100.00% | 1 | 100.00% | 
| Total | 29 | 100.00% | 1 | 100.00% | 
static void armada_370_xp_timer_resume(void)
{
	writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
	writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
	writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF);
	writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Thomas Petazzoni | 44 | 100.00% | 1 | 100.00% | 
| Total | 44 | 100.00% | 1 | 100.00% | 
static struct syscore_ops armada_370_xp_timer_syscore_ops = {
	.suspend	= armada_370_xp_timer_suspend,
	.resume		= armada_370_xp_timer_resume,
};
static unsigned long armada_370_delay_timer_read(void)
{
	return ~readl(timer_base + TIMER0_VAL_OFF);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Russell King | 18 | 100.00% | 1 | 100.00% | 
| Total | 18 | 100.00% | 1 | 100.00% | 
static struct delay_timer armada_370_delay_timer = {
	.read_current_timer = armada_370_delay_timer_read,
};
static int __init armada_370_xp_timer_common_init(struct device_node *np)
{
	u32 clr = 0, set = 0;
	int res;
	timer_base = of_iomap(np, 0);
	if (!timer_base) {
		pr_err("Failed to iomap\n");
		return -ENXIO;
	}
	local_base = of_iomap(np, 1);
	if (!local_base) {
		pr_err("Failed to iomap\n");
		return -ENXIO;
	}
	if (timer25Mhz) {
		set = TIMER0_25MHZ;		
		enable_mask = TIMER0_EN;
	} else {
		clr = TIMER0_25MHZ;
		enable_mask = TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT);
	}
	atomic_io_modify(timer_base + TIMER_CTRL_OFF, clr | set, set);
	local_timer_ctrl_clrset(clr, set);
	/*
         * We use timer 0 as clocksource, and private(local) timer 0
         * for clockevents
         */
	armada_370_xp_clkevt_irq = irq_of_parse_and_map(np, 4);
	ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
	/*
         * Setup free-running clocksource timer (interrupts
         * disabled).
         */
	writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
	writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
	atomic_io_modify(timer_base + TIMER_CTRL_OFF,
		TIMER0_RELOAD_EN | enable_mask,
		TIMER0_RELOAD_EN | enable_mask);
	armada_370_delay_timer.freq = timer_clk;
	register_current_timer_delay(&armada_370_delay_timer);
	/*
         * Set scale and timer for sched_clock.
         */
	sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
	res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
				    "armada_370_xp_clocksource",
				    timer_clk, 300, 32, clocksource_mmio_readl_down);
	if (res) {
		pr_err("Failed to initialize clocksource mmio\n");
		return res;
	}
	armada_370_xp_evt = alloc_percpu(struct clock_event_device);
	if (!armada_370_xp_evt)
		return -ENOMEM;
	/*
         * Setup clockevent timer (interrupt-driven).
         */
	res = request_percpu_irq(armada_370_xp_clkevt_irq,
				armada_370_xp_timer_interrupt,
				"armada_370_xp_per_cpu_tick",
				armada_370_xp_evt);
	/* Immediately configure the timer on the boot CPU */
	if (res) {
		pr_err("Failed to request percpu irq\n");
		return res;
	}
	res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
				"clockevents/armada:starting",
				armada_370_xp_timer_starting_cpu,
				armada_370_xp_timer_dying_cpu);
	if (res) {
		pr_err("Failed to setup hotplug state and timer\n");
		return res;
	}
	register_syscore_ops(&armada_370_xp_timer_syscore_ops);
	
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Gregory CLEMENT | 140 | 44.16% | 2 | 13.33% | 
| Daniel Lezcano | 78 | 24.61% | 1 | 6.67% | 
| Ezequiel García | 62 | 19.56% | 6 | 40.00% | 
| Russell King | 12 | 3.79% | 1 | 6.67% | 
| Richard Cochran | 7 | 2.21% | 1 | 6.67% | 
| Stephen Boyd | 6 | 1.89% | 1 | 6.67% | 
| Thomas Petazzoni | 6 | 1.89% | 1 | 6.67% | 
| Rafał Miłecki | 5 | 1.58% | 1 | 6.67% | 
| Thomas Gleixner | 1 | 0.32% | 1 | 6.67% | 
| Total | 317 | 100.00% | 15 | 100.00% | 
static int __init armada_xp_timer_init(struct device_node *np)
{
	struct clk *clk = of_clk_get_by_name(np, "fixed");
	int ret;
	if (IS_ERR(clk)) {
		pr_err("Failed to get clock\n");
		return PTR_ERR(clk);
	}
	ret = clk_prepare_enable(clk);
	if (ret)
		return ret;
	timer_clk = clk_get_rate(clk);
	return armada_370_xp_timer_common_init(np);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Ezequiel García | 37 | 50.00% | 3 | 50.00% | 
| Daniel Lezcano | 28 | 37.84% | 1 | 16.67% | 
| Gregory CLEMENT | 8 | 10.81% | 1 | 16.67% | 
| Rafał Miłecki | 1 | 1.35% | 1 | 16.67% | 
| Total | 74 | 100.00% | 6 | 100.00% | 
CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
		       armada_xp_timer_init);
static int __init armada_375_timer_init(struct device_node *np)
{
	struct clk *clk;
	int ret;
	clk = of_clk_get_by_name(np, "fixed");
	if (!IS_ERR(clk)) {
		ret = clk_prepare_enable(clk);
		if (ret)
			return ret;
		timer_clk = clk_get_rate(clk);
	} else {
		/*
                 * This fallback is required in order to retain proper
                 * devicetree backwards compatibility.
                 */
		clk = of_clk_get(np, 0);
		/* Must have at least a clock */
		if (IS_ERR(clk)) {
			pr_err("Failed to get clock\n");
			return PTR_ERR(clk);
		}
		ret = clk_prepare_enable(clk);
		if (ret)
			return ret;
		timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
		timer25Mhz = false;
	}
	return armada_370_xp_timer_common_init(np);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Ezequiel García | 89 | 70.08% | 1 | 33.33% | 
| Daniel Lezcano | 37 | 29.13% | 1 | 33.33% | 
| Rafał Miłecki | 1 | 0.79% | 1 | 33.33% | 
| Total | 127 | 100.00% | 3 | 100.00% | 
CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
		       armada_375_timer_init);
static int __init armada_370_timer_init(struct device_node *np)
{
	struct clk *clk;
	int ret;
	clk = of_clk_get(np, 0);
	if (IS_ERR(clk)) {
		pr_err("Failed to get clock\n");
		return PTR_ERR(clk);
	}
	ret = clk_prepare_enable(clk);
	if (ret)
		return ret;
	timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
	timer25Mhz = false;
	return armada_370_xp_timer_common_init(np);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Ezequiel García | 48 | 58.54% | 2 | 40.00% | 
| Daniel Lezcano | 32 | 39.02% | 1 | 20.00% | 
| Gregory CLEMENT | 1 | 1.22% | 1 | 20.00% | 
| Rafał Miłecki | 1 | 1.22% | 1 | 20.00% | 
| Total | 82 | 100.00% | 5 | 100.00% | 
CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
		       armada_370_timer_init);
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Gregory CLEMENT | 538 | 40.39% | 3 | 12.00% | 
| Ezequiel García | 329 | 24.70% | 10 | 40.00% | 
| Daniel Lezcano | 178 | 13.36% | 2 | 8.00% | 
| Thomas Petazzoni | 104 | 7.81% | 1 | 4.00% | 
| Viresh Kumar | 60 | 4.50% | 1 | 4.00% | 
| Russell King | 45 | 3.38% | 1 | 4.00% | 
| Richard Cochran | 37 | 2.78% | 1 | 4.00% | 
| Stephen Boyd | 31 | 2.33% | 3 | 12.00% | 
| Rafał Miłecki | 8 | 0.60% | 1 | 4.00% | 
| Thomas Gleixner | 1 | 0.08% | 1 | 4.00% | 
| Ben Dooks | 1 | 0.08% | 1 | 4.00% | 
| Total | 1332 | 100.00% | 25 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.