cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/sysdev/mv64x60_pic.c

/*
 * Interrupt handling for Marvell mv64360/mv64460 host bridges (Discovery)
 *
 * Author: Dale Farnsworth <dale@farnsworth.org>
 *
 * 2007 (c) MontaVista, Software, Inc.  This file is licensed under
 * the terms of the GNU General Public License version 2.  This program
 * is licensed "as is" without any warranty of any kind, whether express
 * or implied.
 */

#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>

#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/irq.h>

#include "mv64x60.h"

/* Interrupt Controller Interface Registers */

#define MV64X60_IC_MAIN_CAUSE_LO	0x0004

#define MV64X60_IC_MAIN_CAUSE_HI	0x000c

#define MV64X60_IC_CPU0_INTR_MASK_LO	0x0014

#define MV64X60_IC_CPU0_INTR_MASK_HI	0x001c

#define MV64X60_IC_CPU0_SELECT_CAUSE	0x0024


#define MV64X60_HIGH_GPP_GROUPS		0x0f000000

#define MV64X60_SELECT_CAUSE_HIGH	0x40000000

/* General Purpose Pins Controller Interface Registers */

#define MV64x60_GPP_INTR_CAUSE		0x0008

#define MV64x60_GPP_INTR_MASK		0x000c


#define MV64x60_LEVEL1_LOW		0

#define MV64x60_LEVEL1_HIGH		1

#define MV64x60_LEVEL1_GPP		2


#define MV64x60_LEVEL1_MASK		0x00000060

#define MV64x60_LEVEL1_OFFSET		5


#define MV64x60_LEVEL2_MASK		0x0000001f


#define MV64x60_NUM_IRQS		96

static DEFINE_SPINLOCK(mv64x60_lock);


static void __iomem *mv64x60_irq_reg_base;

static void __iomem *mv64x60_gpp_reg_base;

/*
 * Interrupt Controller Handling
 *
 * The interrupt controller handles three groups of interrupts:
 *   main low:  IRQ0-IRQ31
 *   main high: IRQ32-IRQ63
 *   gpp:       IRQ64-IRQ95
 *
 * This code handles interrupts in two levels.  Level 1 selects the
 * interrupt group, and level 2 selects an IRQ within that group.
 * Each group has its own irq_chip structure.
 */


static u32 mv64x60_cached_low_mask;

static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS;

static u32 mv64x60_cached_gpp_mask;


static struct irq_domain *mv64x60_irq_host;

/*
 * mv64x60_chip_low functions
 */


static void mv64x60_mask_low(struct irq_data *d) { int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); mv64x60_cached_low_mask &= ~(1 << level2); out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, mv64x60_cached_low_mask); spin_unlock_irqrestore(&mv64x60_lock, flags); (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO); }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth6188.41%133.33%
Lennert Buytenhek57.25%133.33%
Grant C. Likely34.35%133.33%
Total69100.00%3100.00%


static void mv64x60_unmask_low(struct irq_data *d) { int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); mv64x60_cached_low_mask |= 1 << level2; out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, mv64x60_cached_low_mask); spin_unlock_irqrestore(&mv64x60_lock, flags); (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO); }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth5887.88%133.33%
Lennert Buytenhek57.58%133.33%
Grant C. Likely34.55%133.33%
Total66100.00%3100.00%

static struct irq_chip mv64x60_chip_low = { .name = "mv64x60_low", .irq_mask = mv64x60_mask_low, .irq_mask_ack = mv64x60_mask_low, .irq_unmask = mv64x60_unmask_low, }; /* * mv64x60_chip_high functions */
static void mv64x60_mask_high(struct irq_data *d) { int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); mv64x60_cached_high_mask &= ~(1 << level2); out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, mv64x60_cached_high_mask); spin_unlock_irqrestore(&mv64x60_lock, flags); (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI); }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth6188.41%133.33%
Lennert Buytenhek57.25%133.33%
Grant C. Likely34.35%133.33%
Total69100.00%3100.00%


static void mv64x60_unmask_high(struct irq_data *d) { int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); mv64x60_cached_high_mask |= 1 << level2; out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, mv64x60_cached_high_mask); spin_unlock_irqrestore(&mv64x60_lock, flags); (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI); }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth5887.88%133.33%
Lennert Buytenhek57.58%133.33%
Grant C. Likely34.55%133.33%
Total66100.00%3100.00%

static struct irq_chip mv64x60_chip_high = { .name = "mv64x60_high", .irq_mask = mv64x60_mask_high, .irq_mask_ack = mv64x60_mask_high, .irq_unmask = mv64x60_unmask_high, }; /* * mv64x60_chip_gpp functions */
static void mv64x60_mask_gpp(struct irq_data *d) { int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); mv64x60_cached_gpp_mask &= ~(1 << level2); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, mv64x60_cached_gpp_mask); spin_unlock_irqrestore(&mv64x60_lock, flags); (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK); }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth6188.41%133.33%
Lennert Buytenhek57.25%133.33%
Grant C. Likely34.35%133.33%
Total69100.00%3100.00%


static void mv64x60_mask_ack_gpp(struct irq_data *d) { int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); mv64x60_cached_gpp_mask &= ~(1 << level2); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, mv64x60_cached_gpp_mask); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, ~(1 << level2)); spin_unlock_irqrestore(&mv64x60_lock, flags); (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE); }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth7590.36%133.33%
Lennert Buytenhek56.02%133.33%
Grant C. Likely33.61%133.33%
Total83100.00%3100.00%


static void mv64x60_unmask_gpp(struct irq_data *d) { int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; unsigned long flags; spin_lock_irqsave(&mv64x60_lock, flags); mv64x60_cached_gpp_mask |= 1 << level2; out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, mv64x60_cached_gpp_mask); spin_unlock_irqrestore(&mv64x60_lock, flags); (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK); }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth5887.88%133.33%
Lennert Buytenhek57.58%133.33%
Grant C. Likely34.55%133.33%
Total66100.00%3100.00%

static struct irq_chip mv64x60_chip_gpp = { .name = "mv64x60_gpp", .irq_mask = mv64x60_mask_gpp, .irq_mask_ack = mv64x60_mask_ack_gpp, .irq_unmask = mv64x60_unmask_gpp, }; /* * mv64x60_host_ops functions */ static struct irq_chip *mv64x60_chips[] = { [MV64x60_LEVEL1_LOW] = &mv64x60_chip_low, [MV64x60_LEVEL1_HIGH] = &mv64x60_chip_high, [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp, };
static int mv64x60_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hwirq) { int level1; irq_set_status_flags(virq, IRQ_LEVEL); level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; BUG_ON(level1 > MV64x60_LEVEL1_GPP); irq_set_chip_and_handler(virq, mv64x60_chips[level1], handle_level_irq); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth5591.67%125.00%
Thomas Gleixner46.67%250.00%
Grant C. Likely11.67%125.00%
Total60100.00%4100.00%

static const struct irq_domain_ops mv64x60_host_ops = { .map = mv64x60_host_map, }; /* * Global functions */
void __init mv64x60_init_irq(void) { struct device_node *np; phys_addr_t paddr; unsigned int size; const unsigned int *reg; unsigned long flags; np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-gpp"); reg = of_get_property(np, "reg", &size); paddr = of_translate_address(np, reg); mv64x60_gpp_reg_base = ioremap(paddr, reg[1]); of_node_put(np); np = of_find_compatible_node(NULL, NULL, "marvell,mv64360-pic"); reg = of_get_property(np, "reg", &size); paddr = of_translate_address(np, reg); mv64x60_irq_reg_base = ioremap(paddr, reg[1]); mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS, &mv64x60_host_ops, NULL); spin_lock_irqsave(&mv64x60_lock, flags); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, mv64x60_cached_gpp_mask); out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO, mv64x60_cached_low_mask); out_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI, mv64x60_cached_high_mask); out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE, 0); out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_LO, 0); out_le32(mv64x60_irq_reg_base + MV64X60_IC_MAIN_CAUSE_HI, 0); spin_unlock_irqrestore(&mv64x60_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth20197.10%125.00%
Michael Ellerman20.97%125.00%
Grant C. Likely20.97%125.00%
Mark A. Greer20.97%125.00%
Total207100.00%4100.00%


unsigned int mv64x60_get_irq(void) { u32 cause; int level1; irq_hw_number_t hwirq; int virq = 0; cause = in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_SELECT_CAUSE); if (cause & MV64X60_SELECT_CAUSE_HIGH) { cause &= mv64x60_cached_high_mask; level1 = MV64x60_LEVEL1_HIGH; if (cause & MV64X60_HIGH_GPP_GROUPS) { cause = in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE); cause &= mv64x60_cached_gpp_mask; level1 = MV64x60_LEVEL1_GPP; } } else { cause &= mv64x60_cached_low_mask; level1 = MV64x60_LEVEL1_LOW; } if (cause) { hwirq = (level1 << MV64x60_LEVEL1_OFFSET) | __ilog2(cause); virq = irq_linear_revmap(mv64x60_irq_host, hwirq); } return virq; }

Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth11399.12%150.00%
Michael Ellerman10.88%150.00%
Total114100.00%2100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Dale Farnsworth105792.96%19.09%
Lennert Buytenhek443.87%19.09%
Grant C. Likely262.29%327.27%
Thomas Gleixner40.35%218.18%
Michael Ellerman30.26%218.18%
Mark A. Greer20.18%19.09%
Krzysztof Kozlowski10.09%19.09%
Total1137100.00%11100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.