cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/xen/irq.c

Directory: arch/x86/xen
// SPDX-License-Identifier: GPL-2.0
#include <linux/hardirq.h>

#include <asm/x86_init.h>

#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
#include <xen/interface/vcpu.h>
#include <xen/features.h>
#include <xen/events.h>

#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>

#include "xen-ops.h"

/*
 * Force a proper event-channel callback from Xen after clearing the
 * callback mask. We do this in a very simple manner, by making a call
 * down into Xen. The pending flag will be checked by Xen on return.
 */

void xen_force_evtchn_callback(void) { (void)HYPERVISOR_xen_version(0, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge17100.00%1100.00%
Total17100.00%1100.00%


asmlinkage __visible unsigned long xen_save_fl(void) { struct vcpu_info *vcpu; unsigned long flags; vcpu = this_cpu_read(xen_vcpu); /* flag has opposite sense of mask */ flags = !vcpu->evtchn_upcall_mask; /* convert to IF type flag -0 -> 0x00000000 -1 -> 0xffffffff */ return (-flags) & X86_EFLAGS_IF; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge4093.02%125.00%
Andi Kleen24.65%250.00%
Alex Shi12.33%125.00%
Total43100.00%4100.00%

PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
__visible void xen_restore_fl(unsigned long flags) { struct vcpu_info *vcpu; /* convert from IF type flag */ flags = !(flags & X86_EFLAGS_IF); /* See xen_irq_enable() for why preemption must be disabled. */ preempt_disable(); vcpu = this_cpu_read(xen_vcpu); vcpu->evtchn_upcall_mask = flags; if (flags == 0) { barrier(); /* unmask then check (avoid races) */ if (unlikely(vcpu->evtchn_upcall_pending)) xen_force_evtchn_callback(); preempt_enable(); } else preempt_enable_no_resched(); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge6386.30%125.00%
David Vrabel810.96%125.00%
Alex Shi11.37%125.00%
Andi Kleen11.37%125.00%
Total73100.00%4100.00%

PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
asmlinkage __visible void xen_irq_disable(void) { /* There's a one instruction preempt window here. We need to make sure we're don't switch CPUs between getting the vcpu pointer and updating the mask. */ preempt_disable(); this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1; preempt_enable_no_resched(); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge2288.00%125.00%
Andi Kleen28.00%250.00%
Alex Shi14.00%125.00%
Total25100.00%4100.00%

PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
asmlinkage __visible void xen_irq_enable(void) { struct vcpu_info *vcpu; /* * We may be preempted as soon as vcpu->evtchn_upcall_mask is * cleared, so disable preemption to ensure we check for * events on the VCPU we are still running on. */ preempt_disable(); vcpu = this_cpu_read(xen_vcpu); vcpu->evtchn_upcall_mask = 0; /* Doesn't matter if we get preempted here, because any pending event will get dealt with anyway. */ barrier(); /* unmask then check (avoid races) */ if (unlikely(vcpu->evtchn_upcall_pending)) xen_force_evtchn_callback(); preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge4180.39%120.00%
David Vrabel713.73%120.00%
Andi Kleen23.92%240.00%
Alex Shi11.96%120.00%
Total51100.00%5100.00%

PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
static void xen_safe_halt(void) { /* Blocking includes an implicit local_irq_enable(). */ if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0) BUG(); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge23100.00%1100.00%
Total23100.00%1100.00%


static void xen_halt(void) { if (irqs_disabled()) HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL); else xen_safe_halt(); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge2790.00%150.00%
Vitaly Kuznetsov310.00%150.00%
Total30100.00%2100.00%

static const struct pv_irq_ops xen_irq_ops __initconst = { .save_fl = PV_CALLEE_SAVE(xen_save_fl), .restore_fl = PV_CALLEE_SAVE(xen_restore_fl), .irq_disable = PV_CALLEE_SAVE(xen_irq_disable), .irq_enable = PV_CALLEE_SAVE(xen_irq_enable), .safe_halt = xen_safe_halt, .halt = xen_halt, };
void __init xen_init_irq_ops(void) { /* For PVH we use default pv_irq_ops settings. */ if (!xen_feature(XENFEAT_hvm_callback_vector)) pv_irq_ops = xen_irq_ops; x86_init.irqs.intr_init = xen_init_IRQ; }

Contributors

PersonTokensPropCommitsCommitProp
Mukesh Rathor931.03%125.00%
Jeremy Fitzhardinge931.03%125.00%
Thomas Gleixner827.59%125.00%
Randy Dunlap310.34%125.00%
Total29100.00%4100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge33384.52%214.29%
David Vrabel153.81%17.14%
Mukesh Rathor123.05%17.14%
Thomas Gleixner112.79%17.14%
Andi Kleen71.78%214.29%
Alex Shi41.02%17.14%
Vitaly Kuznetsov30.76%17.14%
Stefano Stabellini30.76%17.14%
Randy Dunlap30.76%17.14%
Greg Kroah-Hartman10.25%17.14%
Juergen Gross10.25%17.14%
Daniel Kiper10.25%17.14%
Total394100.00%14100.00%
Directory: arch/x86/xen
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.