cregit-Linux how code gets into the kernel

Release 4.14 arch/mn10300/kernel/irq.c

/* MN10300 Arch-specific interrupt handling
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public Licence
 * as published by the Free Software Foundation; either version
 * 2 of the Licence, or (at your option) any later version.
 */
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <asm/setup.h>
#include <asm/serial-regs.h>


unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
	[0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
};

EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);

#ifdef CONFIG_SMP

static char irq_affinity_online[NR_IRQS] = {
	[0 ... NR_IRQS - 1] = 0
};


#define NR_IRQ_WORDS	((NR_IRQS + 31) / 32)

static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
	[0 ... NR_IRQ_WORDS - 1] = 0
};
#endif  /* CONFIG_SMP */


atomic_t irq_err_count;

/*
 * MN10300 interrupt controller operations
 */

static void mn10300_cpupic_ack(struct irq_data *d) { unsigned int irq = d->irq; unsigned long flags; u16 tmp; flags = arch_local_cli_save(); GxICR_u8(irq) = GxICR_DETECT; tmp = GxICR(irq); arch_local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
David Howells2448.00%133.33%
Akira Takeuchi1530.00%133.33%
Thomas Gleixner1122.00%133.33%
Total50100.00%3100.00%


static void __mask_and_set_icr(unsigned int irq, unsigned int mask, unsigned int set) { unsigned long flags; u16 tmp; flags = arch_local_cli_save(); tmp = GxICR(irq); GxICR(irq) = (tmp & mask) | set; tmp = GxICR(irq); arch_local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
Akira Takeuchi3150.00%150.00%
David Howells3150.00%150.00%
Total62100.00%2100.00%


static void mn10300_cpupic_mask(struct irq_data *d) { __mask_and_set_icr(d->irq, GxICR_LEVEL, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Akira Takeuchi1568.18%133.33%
Thomas Gleixner627.27%133.33%
David Howells14.55%133.33%
Total22100.00%3100.00%


static void mn10300_cpupic_mask_ack(struct irq_data *d) { unsigned int irq = d->irq; #ifdef CONFIG_SMP unsigned long flags; u16 tmp; flags = arch_local_cli_save(); if (!test_and_clear_bit(irq, irq_affinity_request)) { tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT; tmp = GxICR(irq); } else { u16 tmp2; tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL); tmp2 = GxICR(irq); irq_affinity_online[irq] = cpumask_any_and(irq_data_get_affinity_mask(d), cpu_online_mask); CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); } arch_local_irq_restore(flags); #else /* CONFIG_SMP */ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT); #endif /* CONFIG_SMP */ }

Contributors

PersonTokensPropCommitsCommitProp
David Howells8146.82%228.57%
Akira Takeuchi6235.84%114.29%
Mark Salter126.94%114.29%
Thomas Gleixner126.94%114.29%
Jiang Liu31.73%114.29%
Motohiro Kosaki31.73%114.29%
Total173100.00%7100.00%


static void mn10300_cpupic_unmask(struct irq_data *d) { __mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE); }

Contributors

PersonTokensPropCommitsCommitProp
Akira Takeuchi1568.18%133.33%
Thomas Gleixner627.27%133.33%
David Howells14.55%133.33%
Total22100.00%3100.00%


static void mn10300_cpupic_unmask_clear(struct irq_data *d) { unsigned int irq = d->irq; /* the MN10300 PIC latches its interrupt request bit, even after the * device has ceased to assert its interrupt line and the interrupt * channel has been disabled in the PIC, so for level-triggered * interrupts we need to clear the request bit when we re-enable */ #ifdef CONFIG_SMP unsigned long flags; u16 tmp; flags = arch_local_cli_save(); if (!test_and_clear_bit(irq, irq_affinity_request)) { tmp = GxICR(irq); GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; tmp = GxICR(irq); } else { tmp = GxICR(irq); irq_affinity_online[irq] = cpumask_any_and(irq_data_get_affinity_mask(d), cpu_online_mask); CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); } arch_local_irq_restore(flags); #else /* CONFIG_SMP */ __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT); #endif /* CONFIG_SMP */ }

Contributors

PersonTokensPropCommitsCommitProp
Akira Takeuchi12580.65%120.00%
Thomas Gleixner127.74%120.00%
Mark Salter127.74%120.00%
Motohiro Kosaki31.94%120.00%
Jiang Liu31.94%120.00%
Total155100.00%5100.00%

#ifdef CONFIG_SMP
static int mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask, bool force) { unsigned long flags; flags = arch_local_cli_save(); set_bit(d->irq, irq_affinity_request); arch_local_irq_restore(flags); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Akira Takeuchi3678.26%133.33%
Thomas Gleixner919.57%133.33%
Mark Salter12.17%133.33%
Total46100.00%3100.00%

#endif /* CONFIG_SMP */ /* * MN10300 PIC level-triggered IRQ handling. * * The PIC has no 'ACK' function per se. It is possible to clear individual * channel latches, but each latch relatches whether or not the channel is * masked, so we need to clear the latch when we unmask the channel. * * Also for this reason, we don't supply an ack() op (it's unused anyway if * mask_ack() is provided), and mask_ack() just masks. */ static struct irq_chip mn10300_cpu_pic_level = { .name = "cpu_l", .irq_disable = mn10300_cpupic_mask, .irq_enable = mn10300_cpupic_unmask_clear, .irq_ack = NULL, .irq_mask = mn10300_cpupic_mask, .irq_mask_ack = mn10300_cpupic_mask, .irq_unmask = mn10300_cpupic_unmask_clear, #ifdef CONFIG_SMP .irq_set_affinity = mn10300_cpupic_setaffinity, #endif }; /* * MN10300 PIC edge-triggered IRQ handling. * * We use the latch clearing function of the PIC as the 'ACK' function. */ static struct irq_chip mn10300_cpu_pic_edge = { .name = "cpu_e", .irq_disable = mn10300_cpupic_mask, .irq_enable = mn10300_cpupic_unmask, .irq_ack = mn10300_cpupic_ack, .irq_mask = mn10300_cpupic_mask, .irq_mask_ack = mn10300_cpupic_mask_ack, .irq_unmask = mn10300_cpupic_unmask, #ifdef CONFIG_SMP .irq_set_affinity = mn10300_cpupic_setaffinity, #endif }; /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. */
void ack_bad_irq(int irq) { printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq); }

Contributors

PersonTokensPropCommitsCommitProp
Akira Takeuchi956.25%150.00%
David Howells743.75%150.00%
Total16100.00%2100.00%

/* * change the level at which an IRQ executes * - must not be called whilst interrupts are being processed! */
void set_intr_level(int irq, u16 level) { BUG_ON(in_interrupt()); __mask_and_set_icr(irq, GxICR_ENABLE, level); }

Contributors

PersonTokensPropCommitsCommitProp
Akira Takeuchi26100.00%1100.00%
Total26100.00%1100.00%

/* * mark an interrupt to be ACK'd after interrupt handlers have been run rather * than before */
void mn10300_set_lateack_irq_type(int irq) { irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level, handle_level_irq); }

Contributors

PersonTokensPropCommitsCommitProp
David Howells1688.89%250.00%
Thomas Gleixner15.56%125.00%
Akira Takeuchi15.56%125.00%
Total18100.00%4100.00%

/* * initialise the interrupt system */
void __init init_IRQ(void) { int irq; for (irq = 0; irq < NR_IRQS; irq++) if (irq_get_chip(irq) == &no_irq_chip) /* due to the PIC latching interrupt requests, even * when the IRQ is disabled, IRQ_PENDING is superfluous * and we can use handle_level_irq() for edge-triggered * interrupts */ irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge, handle_level_irq); unit_init_IRQ(); }

Contributors

PersonTokensPropCommitsCommitProp
David Howells4389.58%240.00%
Thomas Gleixner510.42%360.00%
Total48100.00%5100.00%

/* * handle normal device IRQs */
asmlinkage void do_IRQ(void) { unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw; unsigned int cpu_id = smp_processor_id(); int irq; sp = current_stack_pointer(); BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN); /* make sure local_irq_enable() doesn't muck up the interrupt priority * setting in EPSW */ old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id]; local_save_flags(epsw); __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw); irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL; #ifdef CONFIG_MN10300_WD_TIMER __IRQ_STAT(cpu_id, __irq_count)++; #endif irq_enter(); for (;;) { /* ask the interrupt controller for the next IRQ to process * - the result we get depends on EPSW.IM */ irq = IAGR & IAGR_GN; if (!irq) break; local_irq_restore(irq_disabled_epsw); generic_handle_irq(irq >> 2); /* restore IRQ controls for IAGR access */ local_irq_restore(epsw); } __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw; irq_exit(); }

Contributors

PersonTokensPropCommitsCommitProp
David Howells12282.99%133.33%
Akira Takeuchi2214.97%133.33%
Stoyan Gaydarov32.04%133.33%
Total147100.00%3100.00%

/* * Display interrupt management information through /proc/interrupts */
int arch_show_interrupts(struct seq_file *p, int prec) { #ifdef CONFIG_MN10300_WD_TIMER int j; seq_printf(p, "%*s: ", prec, "NMI"); for (j = 0; j < NR_CPUS; j++) if (cpu_online(j)) seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); #endif seq_printf(p, "%*s: ", prec, "ERR"); seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Howells7980.61%150.00%
Thomas Gleixner1919.39%150.00%
Total98100.00%2100.00%

#ifdef CONFIG_HOTPLUG_CPU
void migrate_irqs(void) { int irq; unsigned int self, new; unsigned long flags; self = smp_processor_id(); for (irq = 0; irq < NR_IRQS; irq++) { struct irq_data *data = irq_get_irq_data(irq); struct cpumask *mask = irq_data_get_affinity_mask(data); if (irqd_is_per_cpu(data)) continue; if (cpumask_test_cpu(self, mask) && !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) { int cpu_id; cpu_id = cpumask_first(cpu_online_mask); cpumask_set_cpu(cpu_id, mask); } /* We need to operate irq_affinity_online atomically. */ arch_local_cli_save(flags); if (irq_affinity_online[irq] == self) { u16 x, tmp; x = GxICR(irq); GxICR(irq) = x & GxICR_LEVEL; tmp = GxICR(irq); new = cpumask_any_and(mask, cpu_online_mask); irq_affinity_online[irq] = new; CROSS_GxICR(irq, new) = (x & GxICR_LEVEL) | GxICR_DETECT; tmp = CROSS_GxICR(irq, new); x &= GxICR_LEVEL | GxICR_ENABLE; if (GxICR(irq) & GxICR_REQUEST) x |= GxICR_REQUEST | GxICR_DETECT; CROSS_GxICR(irq, new) = x; tmp = CROSS_GxICR(irq, new); } arch_local_irq_restore(flags); } }

Contributors

PersonTokensPropCommitsCommitProp
Akira Takeuchi19783.12%120.00%
Jiang Liu135.49%120.00%
Thomas Gleixner135.49%120.00%
Motohiro Kosaki104.22%120.00%
Mark Salter41.69%120.00%
Total237100.00%5100.00%

#endif /* CONFIG_HOTPLUG_CPU */

Overall Contributors

PersonTokensPropCommitsCommitProp
Akira Takeuchi66549.33%17.14%
David Howells50737.61%214.29%
Thomas Gleixner1088.01%535.71%
Mark Salter292.15%214.29%
Jiang Liu191.41%17.14%
Motohiro Kosaki161.19%17.14%
Stoyan Gaydarov30.22%17.14%
Paul Bolle10.07%17.14%
Total1348100.00%14100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.