cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/kvm/i8259.c

Directory: arch/x86/kvm
/*
 * 8259 interrupt controller emulation
 *
 * Copyright (c) 2003-2004 Fabrice Bellard
 * Copyright (c) 2007 Intel Corporation
 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 * Authors:
 *   Yaozu (Eddie) Dong <Eddie.dong@intel.com>
 *   Port from Qemu.
 */
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include "irq.h"

#include <linux/kvm_host.h>
#include "trace.h"


#define pr_pic_unimpl(fmt, ...)	\
	pr_err_ratelimited("kvm: pic: " fmt, ## __VA_ARGS__)

static void pic_irq_request(struct kvm *kvm, int level);


static void pic_lock(struct kvm_pic *s) __acquires(&s->lock

Contributors

PersonTokensPropCommitsCommitProp
Jan Kiszka12100.00%1100.00%
Total12100.00%1100.00%

) { spin_lock(&s->lock); }
static void pic_unlock(struct kvm_pic *s) __releases(&s->lock

Contributors

PersonTokensPropCommitsCommitProp
Jan Kiszka12100.00%1100.00%
Total12100.00%1100.00%

) { bool wakeup = s->wakeup_needed; struct kvm_vcpu *vcpu; int i; s->wakeup_needed = false; spin_unlock(&s->lock); if (wakeup) { kvm_for_each_vcpu(i, vcpu, s->kvm) { if (kvm_apic_accept_pic_intr(vcpu)) { kvm_make_request(KVM_REQ_EVENT, vcpu); kvm_vcpu_kick(vcpu); return; } } } }
static void pic_clear_isr(struct kvm_kpic_state *s, int irq) { s->isr &= ~(1 << irq); if (s != &s->pics_state->pics[0]) irq += 8; /* * We are dropping lock while calling ack notifiers since ack * notifier callbacks for assigned devices call into PIC recursively. * Other interrupt may be delivered to PIC while lock is dropped but * it should be safe since PIC state is already updated at this stage. */ pic_unlock(s->pics_state); kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq); pic_lock(s->pics_state); }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov4763.51%240.00%
Avi Kivity2432.43%120.00%
Jan Kiszka22.70%120.00%
Marcelo Tosatti11.35%120.00%
Total74100.00%5100.00%

/* * set irq level. If an edge is detected, then the IRR is set to 1 */
static inline int pic_set_irq1(struct kvm_kpic_state *s, int irq, int level) { int mask, ret = 1; mask = 1 << irq; if (s->elcr & mask) /* level triggered */ if (level) { ret = !(s->irr & mask); s->irr |= mask; s->last_irr |= mask; } else { s->irr &= ~mask; s->last_irr &= ~mask; } else /* edge triggered */ if (level) { if ((s->last_irr & mask) == 0) { ret = !(s->irr & mask); s->irr |= mask; } s->last_irr |= mask; } else s->last_irr &= ~mask; return (s->imr & mask) ? -1 : ret; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong11071.90%150.00%
Gleb Natapov4328.10%150.00%
Total153100.00%2100.00%

/* * return the highest priority found in mask (highest = smallest * number). Return 8 if no irq */
static inline int get_priority(struct kvm_kpic_state *s, int mask) { int priority; if (mask == 0) return 8; priority = 0; while ((mask & (1 << ((priority + s->priority_add) & 7))) == 0) priority++; return priority; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong61100.00%1100.00%
Total61100.00%1100.00%

/* * return the pic wanted interrupt. return -1 if none */
static int pic_get_irq(struct kvm_kpic_state *s) { int mask, cur_priority, priority; mask = s->irr & ~s->imr; priority = get_priority(s, mask); if (priority == 8) return -1; /* * compute current priority. If special fully nested mode on the * master, the IRQ coming from the slave is not taken into account * for the priority computation. */ mask = s->isr; if (s->special_fully_nested_mode && s == &s->pics_state->pics[0]) mask &= ~(1 << 2); cur_priority = get_priority(s, mask); if (priority < cur_priority) /* * higher priority found: an irq should be generated */ return (priority + s->priority_add) & 7; else return -1; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong114100.00%1100.00%
Total114100.00%1100.00%

/* * raise irq to CPU if necessary. must be called every time the active * irq may change */
static void pic_update_irq(struct kvm_pic *s) { int irq2, irq; irq2 = pic_get_irq(&s->pics[1]); if (irq2 >= 0) { /* * if irq request by slave pic, signal master PIC */ pic_set_irq1(&s->pics[0], 2, 1); pic_set_irq1(&s->pics[0], 2, 0); } irq = pic_get_irq(&s->pics[0]); pic_irq_request(s->kvm, irq >= 0); }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong8895.65%133.33%
Avi Kivity44.35%266.67%
Total92100.00%3100.00%


void kvm_pic_update_irq(struct kvm_pic *s) { pic_lock(s); pic_update_irq(s); pic_unlock(s); }

Contributors

PersonTokensPropCommitsCommitProp
Qing He1560.00%133.33%
Avi Kivity832.00%133.33%
Jan Kiszka28.00%133.33%
Total25100.00%3100.00%


int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level) { int ret, irq_level; BUG_ON(irq < 0 || irq >= PIC_NUM_PINS); pic_lock(s); irq_level = __kvm_irq_line_state(&s->irq_states[irq], irq_source_id, level); ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level); pic_update_irq(s); trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr, s->pics[irq >> 3].imr, ret == 0); pic_unlock(s); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov4635.94%225.00%
Michael S. Tsirkin3728.91%225.00%
Eddie Dong3426.56%112.50%
Avi Kivity53.91%112.50%
Ben-Ami Yassour53.91%112.50%
Jan Kiszka10.78%112.50%
Total128100.00%8100.00%


void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id) { int i; pic_lock(s); for (i = 0; i < PIC_NUM_PINS; i++) __clear_bit(irq_source_id, &s->irq_states[i]); pic_unlock(s); }

Contributors

PersonTokensPropCommitsCommitProp
Michael S. Tsirkin52100.00%1100.00%
Total52100.00%1100.00%

/* * acknowledge interrupt 'irq' */
static inline void pic_intack(struct kvm_kpic_state *s, int irq) { s->isr |= 1 << irq; /* * We don't clear a level sensitive interrupt here */ if (!(s->elcr & (1 << irq))) s->irr &= ~(1 << irq); if (s->auto_eoi) { if (s->rotate_on_auto_eoi) s->priority_add = (irq + 1) & 7; pic_clear_isr(s, irq); } }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong4351.81%133.33%
Gleb Natapov2732.53%133.33%
Avi Kivity1315.66%133.33%
Total83100.00%3100.00%


int kvm_pic_read_irq(struct kvm *kvm) { int irq, irq2, intno; struct kvm_pic *s = kvm->arch.vpic; s->output = 0; pic_lock(s); irq = pic_get_irq(&s->pics[0]); if (irq >= 0) { pic_intack(&s->pics[0], irq); if (irq == 2) { irq2 = pic_get_irq(&s->pics[1]); if (irq2 >= 0) pic_intack(&s->pics[1], irq2); else /* * spurious IRQ on slave controller */ irq2 = 7; intno = s->pics[1].irq_base + irq2; irq = irq2 + 8; } else intno = s->pics[0].irq_base + irq; } else { /* * spurious IRQ on host controller */ irq = 7; intno = s->pics[0].irq_base + irq; } pic_update_irq(s); pic_unlock(s); return intno; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong15784.41%116.67%
Marcelo Tosatti94.84%116.67%
Avi Kivity84.30%116.67%
Gleb Natapov63.23%116.67%
David Hildenbrand42.15%116.67%
Jan Kiszka21.08%116.67%
Total186100.00%6100.00%


static void kvm_pic_reset(struct kvm_kpic_state *s) { int irq, i; struct kvm_vcpu *vcpu; u8 edge_irr = s->irr & ~s->elcr; bool found = false; s->last_irr = 0; s->irr &= s->elcr; s->imr = 0; s->priority_add = 0; s->special_mask = 0; s->read_reg_select = 0; if (!s->init4) { s->special_fully_nested_mode = 0; s->auto_eoi = 0; } s->init_state = 1; kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) if (kvm_apic_accept_pic_intr(vcpu)) { found = true; break; } if (!found) return; for (irq = 0; irq < PIC_NUM_PINS/2; irq++) if (edge_irr & (1 << irq)) pic_clear_isr(s, irq); }

Contributors

PersonTokensPropCommitsCommitProp
Gleb Natapov9055.21%333.33%
Eddie Dong6036.81%222.22%
Marcelo Tosatti116.75%222.22%
David Hildenbrand10.61%111.11%
Avi Kivity10.61%111.11%
Total163100.00%9100.00%


static void pic_ioport_write(void *opaque, u32 addr, u32 val) { struct kvm_kpic_state *s = opaque; int priority, cmd, irq; addr &= 1; if (addr == 0) { if (val & 0x10) { s->init4 = val & 1; if (val & 0x02) pr_pic_unimpl("single mode not supported"); if (val & 0x08) pr_pic_unimpl( "level sensitive irq not supported"); kvm_pic_reset(s); } else if (val & 0x08) { if (val & 0x04) s->poll = 1; if (val & 0x02) s->read_reg_select = val & 1; if (val & 0x40) s->special_mask = (val >> 5) & 1; } else { cmd = val >> 5; switch (cmd) { case 0: case 4: s->rotate_on_auto_eoi = cmd >> 2; break; case 1: /* end of interrupt */ case 5: priority = get_priority(s, s->isr); if (priority != 8) { irq = (priority + s->priority_add) & 7; if (cmd == 5) s->priority_add = (irq + 1) & 7; pic_clear_isr(s, irq); pic_update_irq(s->pics_state); } break; case 3: irq = val & 7; pic_clear_isr(s, irq); pic_update_irq(s->pics_state); break; case 6: s->priority_add = (val + 1) & 7; pic_update_irq(s->pics_state); break; case 7: irq = val & 7; s->priority_add = (irq + 1) & 7; pic_clear_isr(s, irq); pic_update_irq(s->pics_state); break; default: break; /* no operation */ } } } else switch (s->init_state) { case 0: { /* normal mode */ u8 imr_diff = s->imr ^ val, off = (s == &s->pics_state->pics[0]) ? 0 : 8; s->imr = val; for (irq = 0; irq < PIC_NUM_PINS/2; irq++) if (imr_diff & (1 << irq)) kvm_fire_mask_notifiers( s->pics_state->kvm, SELECT_PIC(irq + off), irq + off, !!(s->imr & (1 << irq))); pic_update_irq(s->pics_state); break; } case 1: s->irq_base = val & 0xf8; s->init_state = 2; break; case 2: if (s->init4) s->init_state = 3; else s->init_state = 0; break; case 3: s->special_fully_nested_mode = (val >> 4) & 1; s->auto_eoi = (val >> 1) & 1; s->init_state = 0; break; } }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong39676.60%112.50%
Gleb Natapov10219.73%450.00%
Avi Kivity173.29%225.00%
Jan Kiszka20.39%112.50%
Total517100.00%8100.00%


static u32 pic_poll_read(struct kvm_kpic_state *s, u32 addr1) { int ret; ret = pic_get_irq(s); if (ret >= 0) { if (addr1 >> 7) { s->pics_state->pics[0].isr &= ~(1 << 2); s->pics_state->pics[0].irr &= ~(1 << 2); } s->irr &= ~(1 << ret); pic_clear_isr(s, ret); if (addr1 >> 7 || ret != 2) pic_update_irq(s->pics_state); } else { ret = 0x07; pic_update_irq(s->pics_state); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong12496.88%150.00%
Avi Kivity43.12%150.00%
Total128100.00%2100.00%


static u32 pic_ioport_read(void *opaque, u32 addr) { struct kvm_kpic_state *s = opaque; int ret; if (s->poll) { ret = pic_poll_read(s, addr); s->poll = 0; } else if ((addr & 1) == 0) if (s->read_reg_select) ret = s->isr; else ret = s->irr; else ret = s->imr; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong8093.02%150.00%
David Hildenbrand66.98%150.00%
Total86100.00%2100.00%


static void elcr_ioport_write(void *opaque, u32 addr, u32 val) { struct kvm_kpic_state *s = opaque; s->elcr = val & s->elcr_mask; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong33100.00%1100.00%
Total33100.00%1100.00%


static u32 elcr_ioport_read(void *opaque, u32 addr1) { struct kvm_kpic_state *s = opaque; return s->elcr; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong25100.00%1100.00%
Total25100.00%1100.00%


static int picdev_write(struct kvm_pic *s, gpa_t addr, int len, const void *val) { unsigned char data = *(unsigned char *)val; if (len != 1) { pr_pic_unimpl("non byte write\n"); return 0; } switch (addr) { case 0x20: case 0x21: case 0xa0: case 0xa1: pic_lock(s); pic_ioport_write(&s->pics[addr >> 7], addr, data); pic_unlock(s); break; case 0x4d0: case 0x4d1: pic_lock(s); elcr_ioport_write(&s->pics[addr & 1], addr, data); pic_unlock(s); break; default: return -EOPNOTSUPP; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong10072.46%114.29%
David Hildenbrand2215.94%114.29%
Michael S. Tsirkin75.07%114.29%
Avi Kivity42.90%114.29%
Jan Kiszka32.17%228.57%
Sasha Levin21.45%114.29%
Total138100.00%7100.00%


static int picdev_read(struct kvm_pic *s, gpa_t addr, int len, void *val) { unsigned char *data = (unsigned char *)val; if (len != 1) { memset(val, 0, len); pr_pic_unimpl("non byte read\n"); return 0; } switch (addr) { case 0x20: case 0x21: case 0xa0: case 0xa1: pic_lock(s); *data = pic_ioport_read(&s->pics[addr >> 7], addr); pic_unlock(s); break; case 0x4d0: case 0x4d1: pic_lock(s); *data = elcr_ioport_read(&s->pics[addr & 1], addr); pic_unlock(s); break; default: return -EOPNOTSUPP; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong9463.51%114.29%
David Hildenbrand3422.97%228.57%
Petr Matousek96.08%114.29%
Michael S. Tsirkin74.73%114.29%
Sasha Levin21.35%114.29%
Jan Kiszka21.35%114.29%
Total148100.00%7100.00%


static int picdev_master_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { return picdev_write(container_of(dev, struct kvm_pic, dev_master), addr, len, val); }

Contributors

PersonTokensPropCommitsCommitProp
Sasha Levin4289.36%150.00%
Nikolay Nikolaev510.64%150.00%
Total47100.00%2100.00%


static int picdev_master_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val) { return picdev_read(container_of(dev, struct kvm_pic, dev_master), addr, len, val); }

Contributors

PersonTokensPropCommitsCommitProp
Sasha Levin4189.13%150.00%
Nikolay Nikolaev510.87%150.00%
Total46100.00%2100.00%


static int picdev_slave_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { return picdev_write(container_of(dev, struct kvm_pic, dev_slave), addr, len, val); }

Contributors

PersonTokensPropCommitsCommitProp
Sasha Levin4289.36%150.00%
Nikolay Nikolaev510.64%150.00%
Total47100.00%2100.00%


static int picdev_slave_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val) { return picdev_read(container_of(dev, struct kvm_pic, dev_slave), addr, len, val); }

Contributors

PersonTokensPropCommitsCommitProp
Sasha Levin4189.13%150.00%
Nikolay Nikolaev510.87%150.00%
Total46100.00%2100.00%


static int picdev_eclr_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, const void *val) { return picdev_write(container_of(dev, struct kvm_pic, dev_eclr), addr, len, val); }

Contributors

PersonTokensPropCommitsCommitProp
Sasha Levin4289.36%150.00%
Nikolay Nikolaev510.64%150.00%
Total47100.00%2100.00%


static int picdev_eclr_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int len, void *val) { return picdev_read(container_of(dev, struct kvm_pic, dev_eclr), addr, len, val); }

Contributors

PersonTokensPropCommitsCommitProp
Sasha Levin4189.13%150.00%
Nikolay Nikolaev510.87%150.00%
Total46100.00%2100.00%

/* * callback when PIC0 irq status changed */
static void pic_irq_request(struct kvm *kvm, int level) { struct kvm_pic *s = kvm->arch.vpic; if (!s->output) s->wakeup_needed = true; s->output = level; }

Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong1738.64%228.57%
Marcelo Tosatti1022.73%114.29%
Gleb Natapov715.91%114.29%
David Hildenbrand49.09%114.29%
Jan Kiszka36.82%114.29%
Avi Kivity36.82%114.29%
Total44100.00%7100.00%

static const struct kvm_io_device_ops picdev_master_ops = { .read = picdev_master_read, .write = picdev_master_write, }; static const struct kvm_io_device_ops picdev_slave_ops = { .read = picdev_slave_read, .write = picdev_slave_write, }; static const struct kvm_io_device_ops picdev_eclr_ops = { .read = picdev_eclr_read, .write = picdev_eclr_write, };
int kvm_pic_init(struct kvm *kvm) { struct kvm_pic *s; int ret; s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); if (!s) return -ENOMEM; spin_lock_init(&s->lock); s->kvm = kvm; s->pics[0].elcr_mask = 0xf8; s->pics[1].elcr_mask = 0xde; s->pics[0].pics_state = s; s->pics[1].pics_state = s; /* * Initialize PIO device */ kvm_iodevice_init(&s->dev_master, &picdev_master_ops); kvm_iodevice_init(&s->dev_slave, &picdev_slave_ops); kvm_iodevice_init(&s->dev_eclr, &picdev_eclr_ops); mutex_lock(&kvm->slots_lock); ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x20, 2, &s->dev_master); if (ret < 0) goto fail_unlock; ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0xa0, 2, &s->dev_slave); if (ret < 0) goto fail_unreg_2; ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, 0x4d0, 2, &s->dev_eclr); if (ret < 0) goto fail_unreg_1; mutex_unlock(&kvm->slots_lock); kvm->arch.vpic = s; return 0; fail_unreg_1: kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_slave); fail_unreg_2: kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &s->dev_master); fail_unlock: mutex_unlock(&kvm->slots_lock); kfree(s); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Sasha Levin12844.76%110.00%
Eddie Dong8830.77%110.00%
Gregory Haskins258.74%220.00%
Marcelo Tosatti155.24%220.00%
Radim Krčmář144.90%110.00%
Avi Kivity144.90%220.00%
Michael S. Tsirkin20.70%110.00%
Total286100.00%10100.00%


void kvm_pic_destroy(struct kvm *kvm) { struct kvm_pic *vpic = kvm->arch.vpic; if (!vpic) return; mutex_lock(&kvm->slots_lock); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); mutex_unlock(&kvm->slots_lock); kvm->arch.vpic = NULL; kfree(vpic); }

Contributors

PersonTokensPropCommitsCommitProp
Sasha Levin2525.51%116.67%
Wei Yongjun2222.45%116.67%
Radim Krčmář2222.45%116.67%
David Hildenbrand1616.33%116.67%
Paolo Bonzini77.14%116.67%
Peter Xu66.12%116.67%
Total98100.00%6100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Eddie Dong163652.44%35.45%
Sasha Levin44514.26%11.82%
Gleb Natapov37111.89%1120.00%
Avi Kivity1304.17%814.55%
Michael S. Tsirkin1053.37%47.27%
Jan Kiszka993.17%23.64%
David Hildenbrand922.95%712.73%
Marcelo Tosatti461.47%59.09%
Gregory Haskins401.28%23.64%
Radim Krčmář361.15%11.82%
Nikolay Nikolaev300.96%11.82%
Wei Yongjun220.71%11.82%
Chris Lalancette200.64%11.82%
Qing He150.48%11.82%
Petr Matousek90.29%11.82%
Paolo Bonzini70.22%11.82%
Peter Xu60.19%11.82%
Ben-Ami Yassour50.16%11.82%
Tejun Heo30.10%11.82%
Hollis Blanchard20.06%11.82%
Nicolas Kaiser10.03%11.82%
Total3120100.00%55100.00%
Directory: arch/x86/kvm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.