Release 4.7 drivers/xen/events/events_fifo.c
  
  
/*
 * Xen event channels (FIFO-based ABI)
 *
 * Copyright (C) 2013 Citrix Systems R&D ltd.
 *
 * This source code is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation; either version 2 of the
 * License, or (at your option) any later version.
 *
 * Or, when distributed separately from the Linux kernel or
 * incorporated into other software packages, subject to the following
 * license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/linkage.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <asm/barrier.h>
#include <asm/sync_bitops.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#include <xen/xen-ops.h>
#include <xen/events.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
#include <xen/page.h>
#include "events_internal.h"
#define EVENT_WORDS_PER_PAGE (XEN_PAGE_SIZE / sizeof(event_word_t))
#define MAX_EVENT_ARRAY_PAGES (EVTCHN_FIFO_NR_CHANNELS / EVENT_WORDS_PER_PAGE)
struct evtchn_fifo_queue {
	
uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
};
static DEFINE_PER_CPU(struct evtchn_fifo_control_block *, cpu_control_block);
static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
static unsigned event_array_pages __read_mostly;
/*
 * sync_set_bit() and friends must be unsigned long aligned.
 */
#if BITS_PER_LONG > 32
#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
#define EVTCHN_FIFO_BIT(b, w) \
    (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
#else
#define BM(w) ((unsigned long *)(w))
#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
#endif
static inline event_word_t *event_word_from_port(unsigned port)
{
	unsigned i = port / EVENT_WORDS_PER_PAGE;
	return event_array[i] + port % EVENT_WORDS_PER_PAGE;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 28 | 100.00% | 1 | 100.00% | 
 | Total | 28 | 100.00% | 1 | 100.00% | 
static unsigned evtchn_fifo_max_channels(void)
{
	return EVTCHN_FIFO_NR_CHANNELS;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 11 | 100.00% | 1 | 100.00% | 
 | Total | 11 | 100.00% | 1 | 100.00% | 
static unsigned evtchn_fifo_nr_channels(void)
{
	return event_array_pages * EVENT_WORDS_PER_PAGE;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 13 | 100.00% | 1 | 100.00% | 
 | Total | 13 | 100.00% | 1 | 100.00% | 
static int init_control_block(int cpu,
                              struct evtchn_fifo_control_block *control_block)
{
	struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
	struct evtchn_init_control init_control;
	unsigned int i;
	/* Reset the control block and the local HEADs. */
	clear_page(control_block);
	for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
		q->head[i] = 0;
	init_control.control_gfn = virt_to_gfn(control_block);
	init_control.offset      = 0;
	init_control.vcpu        = cpu;
	return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 92 | 98.92% | 1 | 50.00% | 
| julien grall | julien grall | 1 | 1.08% | 1 | 50.00% | 
 | Total | 93 | 100.00% | 2 | 100.00% | 
static void free_unused_array_pages(void)
{
	unsigned i;
	for (i = event_array_pages; i < MAX_EVENT_ARRAY_PAGES; i++) {
		if (!event_array[i])
			break;
		free_page((unsigned long)event_array[i]);
		event_array[i] = NULL;
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 54 | 100.00% | 1 | 100.00% | 
 | Total | 54 | 100.00% | 1 | 100.00% | 
static void init_array_page(event_word_t *array_page)
{
	unsigned i;
	for (i = 0; i < EVENT_WORDS_PER_PAGE; i++)
		array_page[i] = 1 << EVTCHN_FIFO_MASKED;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 35 | 100.00% | 1 | 100.00% | 
 | Total | 35 | 100.00% | 1 | 100.00% | 
static int evtchn_fifo_setup(struct irq_info *info)
{
	unsigned port = info->evtchn;
	unsigned new_array_pages;
	int ret;
	new_array_pages = port / EVENT_WORDS_PER_PAGE + 1;
	if (new_array_pages > MAX_EVENT_ARRAY_PAGES)
		return -EINVAL;
	while (event_array_pages < new_array_pages) {
		void *array_page;
		struct evtchn_expand_array expand_array;
		/* Might already have a page if we've resumed. */
		array_page = event_array[event_array_pages];
		if (!array_page) {
			array_page = (void *)__get_free_page(GFP_KERNEL);
			if (array_page == NULL) {
				ret = -ENOMEM;
				goto error;
			}
			event_array[event_array_pages] = array_page;
		}
		/* Mask all events in this page before adding it. */
		init_array_page(array_page);
		expand_array.array_gfn = virt_to_gfn(array_page);
		ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
		if (ret < 0)
			goto error;
		event_array_pages++;
	}
	return 0;
  error:
	if (event_array_pages == 0)
		panic("xen: unable to expand event array with initial page (%d)\n", ret);
	else
		pr_err("unable to expand event array (%d)\n", ret);
	free_unused_array_pages();
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 168 | 95.45% | 1 | 33.33% | 
| wei yongjun | wei yongjun | 7 | 3.98% | 1 | 33.33% | 
| julien grall | julien grall | 1 | 0.57% | 1 | 33.33% | 
 | Total | 176 | 100.00% | 3 | 100.00% | 
static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
{
	/* no-op */
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 15 | 100.00% | 1 | 100.00% | 
 | Total | 15 | 100.00% | 1 | 100.00% | 
static void evtchn_fifo_clear_pending(unsigned port)
{
	event_word_t *word = event_word_from_port(port);
	sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 27 | 81.82% | 1 | 50.00% | 
| vladimir murzin | vladimir murzin | 6 | 18.18% | 1 | 50.00% | 
 | Total | 33 | 100.00% | 2 | 100.00% | 
static void evtchn_fifo_set_pending(unsigned port)
{
	event_word_t *word = event_word_from_port(port);
	sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 27 | 81.82% | 1 | 50.00% | 
| vladimir murzin | vladimir murzin | 6 | 18.18% | 1 | 50.00% | 
 | Total | 33 | 100.00% | 2 | 100.00% | 
static bool evtchn_fifo_is_pending(unsigned port)
{
	event_word_t *word = event_word_from_port(port);
	return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 28 | 82.35% | 1 | 50.00% | 
| vladimir murzin | vladimir murzin | 6 | 17.65% | 1 | 50.00% | 
 | Total | 34 | 100.00% | 2 | 100.00% | 
static bool evtchn_fifo_test_and_set_mask(unsigned port)
{
	event_word_t *word = event_word_from_port(port);
	return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 28 | 82.35% | 1 | 50.00% | 
| vladimir murzin | vladimir murzin | 6 | 17.65% | 1 | 50.00% | 
 | Total | 34 | 100.00% | 2 | 100.00% | 
static void evtchn_fifo_mask(unsigned port)
{
	event_word_t *word = event_word_from_port(port);
	sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 27 | 81.82% | 1 | 50.00% | 
| vladimir murzin | vladimir murzin | 6 | 18.18% | 1 | 50.00% | 
 | Total | 33 | 100.00% | 2 | 100.00% | 
static bool evtchn_fifo_is_masked(unsigned port)
{
	event_word_t *word = event_word_from_port(port);
	return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| vladimir murzin | vladimir murzin | 34 | 100.00% | 1 | 100.00% | 
 | Total | 34 | 100.00% | 1 | 100.00% | 
/*
 * Clear MASKED, spinning if BUSY is set.
 */
static void clear_masked(volatile event_word_t *word)
{
	event_word_t new, old, w;
	w = *word;
	do {
		old = w & ~(1 << EVTCHN_FIFO_BUSY);
		new = old & ~(1 << EVTCHN_FIFO_MASKED);
		w = sync_cmpxchg(word, old, new);
	} while (w != old);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 66 | 100.00% | 1 | 100.00% | 
 | Total | 66 | 100.00% | 1 | 100.00% | 
static void evtchn_fifo_unmask(unsigned port)
{
	event_word_t *word = event_word_from_port(port);
	BUG_ON(!irqs_disabled());
	clear_masked(word);
	if (evtchn_fifo_is_pending(port)) {
		struct evtchn_unmask unmask = { .port = port };
		(void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 59 | 96.72% | 1 | 50.00% | 
| vladimir murzin | vladimir murzin | 2 | 3.28% | 1 | 50.00% | 
 | Total | 61 | 100.00% | 2 | 100.00% | 
static uint32_t clear_linked(volatile event_word_t *word)
{
	event_word_t new, old, w;
	w = *word;
	do {
		old = w;
		new = (w & ~((1 << EVTCHN_FIFO_LINKED)
			     | EVTCHN_FIFO_LINK_MASK));
	} while ((w = sync_cmpxchg(word, old, new)) != old);
	return w & EVTCHN_FIFO_LINK_MASK;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 70 | 100.00% | 1 | 100.00% | 
 | Total | 70 | 100.00% | 1 | 100.00% | 
static void handle_irq_for_port(unsigned port)
{
	int irq;
	irq = get_evtchn_to_irq(port);
	if (irq != -1)
		generic_handle_irq(irq);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 30 | 96.77% | 1 | 50.00% | 
| thomas gleixner | thomas gleixner | 1 | 3.23% | 1 | 50.00% | 
 | Total | 31 | 100.00% | 2 | 100.00% | 
static void consume_one_event(unsigned cpu,
			      struct evtchn_fifo_control_block *control_block,
			      unsigned priority, unsigned long *ready,
			      bool drop)
{
	struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
	uint32_t head;
	unsigned port;
	event_word_t *word;
	head = q->head[priority];
	/*
         * Reached the tail last time?  Read the new HEAD from the
         * control block.
         */
	if (head == 0) {
		virt_rmb(); /* Ensure word is up-to-date before reading head. */
		head = control_block->head[priority];
	}
	port = head;
	word = event_word_from_port(port);
	head = clear_linked(word);
	/*
         * If the link is non-zero, there are more events in the
         * queue, otherwise the queue is empty.
         *
         * If the queue is empty, clear this priority from our local
         * copy of the ready word.
         */
	if (head == 0)
		clear_bit(priority, ready);
	if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
		if (unlikely(drop))
			pr_warn("Dropping pending event for port %u\n", port);
		else
			handle_irq_for_port(port);
	}
	q->head[priority] = head;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 128 | 82.58% | 1 | 25.00% | 
| ross lagerwall | ross lagerwall | 20 | 12.90% | 1 | 25.00% | 
| vladimir murzin | vladimir murzin | 6 | 3.87% | 1 | 25.00% | 
| michael s. tsirkin | michael s. tsirkin | 1 | 0.65% | 1 | 25.00% | 
 | Total | 155 | 100.00% | 4 | 100.00% | 
static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
{
	struct evtchn_fifo_control_block *control_block;
	unsigned long ready;
	unsigned q;
	control_block = per_cpu(cpu_control_block, cpu);
	ready = xchg(&control_block->ready, 0);
	while (ready) {
		q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
		consume_one_event(cpu, control_block, q, &ready, drop);
		ready |= xchg(&control_block->ready, 0);
	}
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 79 | 90.80% | 1 | 33.33% | 
| ross lagerwall | ross lagerwall | 6 | 6.90% | 1 | 33.33% | 
| vladimir murzin | vladimir murzin | 2 | 2.30% | 1 | 33.33% | 
 | Total | 87 | 100.00% | 3 | 100.00% | 
static void evtchn_fifo_handle_events(unsigned cpu)
{
	__evtchn_fifo_handle_events(cpu, false);
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| ross lagerwall | ross lagerwall | 16 | 100.00% | 1 | 100.00% | 
 | Total | 16 | 100.00% | 1 | 100.00% | 
static void evtchn_fifo_resume(void)
{
	unsigned cpu;
	for_each_possible_cpu(cpu) {
		void *control_block = per_cpu(cpu_control_block, cpu);
		int ret;
		if (!control_block)
			continue;
		/*
                 * If this CPU is offline, take the opportunity to
                 * free the control block while it is not being
                 * used.
                 */
		if (!cpu_online(cpu)) {
			free_page((unsigned long)control_block);
			per_cpu(cpu_control_block, cpu) = NULL;
			continue;
		}
		ret = init_control_block(cpu, control_block);
		if (ret < 0)
			BUG();
	}
	/*
         * The event array starts out as empty again and is extended
         * as normal when events are bound.  The existing pages will
         * be reused.
         */
	event_array_pages = 0;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 90 | 100.00% | 2 | 100.00% | 
 | Total | 90 | 100.00% | 2 | 100.00% | 
static const struct evtchn_ops evtchn_ops_fifo = {
	.max_channels      = evtchn_fifo_max_channels,
	.nr_channels       = evtchn_fifo_nr_channels,
	.setup             = evtchn_fifo_setup,
	.bind_to_cpu       = evtchn_fifo_bind_to_cpu,
	.clear_pending     = evtchn_fifo_clear_pending,
	.set_pending       = evtchn_fifo_set_pending,
	.is_pending        = evtchn_fifo_is_pending,
	.test_and_set_mask = evtchn_fifo_test_and_set_mask,
	.mask              = evtchn_fifo_mask,
	.unmask            = evtchn_fifo_unmask,
	.handle_events     = evtchn_fifo_handle_events,
	.resume            = evtchn_fifo_resume,
};
static int evtchn_fifo_alloc_control_block(unsigned cpu)
{
	void *control_block = NULL;
	int ret = -ENOMEM;
	control_block = (void *)__get_free_page(GFP_KERNEL);
	if (control_block == NULL)
		goto error;
	ret = init_control_block(cpu, control_block);
	if (ret < 0)
		goto error;
	per_cpu(cpu_control_block, cpu) = control_block;
	return 0;
  error:
	free_page((unsigned long)control_block);
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 85 | 100.00% | 2 | 100.00% | 
 | Total | 85 | 100.00% | 2 | 100.00% | 
static int evtchn_fifo_cpu_notification(struct notifier_block *self,
						  unsigned long action,
						  void *hcpu)
{
	int cpu = (long)hcpu;
	int ret = 0;
	switch (action) {
	case CPU_UP_PREPARE:
		if (!per_cpu(cpu_control_block, cpu))
			ret = evtchn_fifo_alloc_control_block(cpu);
		break;
	case CPU_DEAD:
		__evtchn_fifo_handle_events(cpu, true);
		break;
	default:
		break;
	}
	return ret < 0 ? NOTIFY_BAD : NOTIFY_OK;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 70 | 86.42% | 2 | 66.67% | 
| ross lagerwall | ross lagerwall | 11 | 13.58% | 1 | 33.33% | 
 | Total | 81 | 100.00% | 3 | 100.00% | 
static struct notifier_block evtchn_fifo_cpu_notifier = {
	.notifier_call	= evtchn_fifo_cpu_notification,
};
int __init xen_evtchn_fifo_init(void)
{
	int cpu = get_cpu();
	int ret;
	ret = evtchn_fifo_alloc_control_block(cpu);
	if (ret < 0)
		goto out;
	pr_info("Using FIFO-based ABI\n");
	evtchn_ops = &evtchn_ops_fifo;
	register_cpu_notifier(&evtchn_fifo_cpu_notifier);
out:
	put_cpu();
	return ret;
}
Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 57 | 100.00% | 2 | 100.00% | 
 | Total | 57 | 100.00% | 2 | 100.00% | 
Overall Contributors
 | Person | Tokens | Prop | Commits | CommitProp | 
| david vrabel | david vrabel | 1480 | 89.16% | 3 | 27.27% | 
| vladimir murzin | vladimir murzin | 109 | 6.57% | 1 | 9.09% | 
| ross lagerwall | ross lagerwall | 53 | 3.19% | 1 | 9.09% | 
| wei yongjun | wei yongjun | 7 | 0.42% | 1 | 9.09% | 
| julien grall | julien grall | 6 | 0.36% | 3 | 27.27% | 
| michael s. tsirkin | michael s. tsirkin | 4 | 0.24% | 1 | 9.09% | 
| thomas gleixner | thomas gleixner | 1 | 0.06% | 1 | 9.09% | 
| paul gortmaker | paul gortmaker |  | 0.00% | 0 | 0.00% | 
 | Total | 1660 | 100.00% | 11 | 100.00% | 
  
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.