Release 4.14 arch/um/drivers/port_kern.c
/*
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
* Licensed under the GPL
*/
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <init.h>
#include <irq_kern.h>
#include <os.h>
#include "port.h"
struct port_list {
struct list_head list;
atomic_t wait_count;
int has_connection;
struct completion done;
int port;
int fd;
spinlock_t lock;
struct list_head pending;
struct list_head connections;
};
struct port_dev {
struct port_list *port;
int helper_pid;
int telnetd_pid;
};
struct connection {
struct list_head list;
int fd;
int helper_pid;
int socket[2];
int telnetd_pid;
struct port_list *port;
};
static irqreturn_t pipe_interrupt(int irq, void *data)
{
struct connection *conn = data;
int fd;
fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
if (fd < 0) {
if (fd == -EAGAIN)
return IRQ_NONE;
printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
-fd);
os_close_file(conn->fd);
}
list_del(&conn->list);
conn->fd = fd;
list_add(&conn->list, &conn->port->connections);
complete(&conn->port->done);
return IRQ_HANDLED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 109 | 93.97% | 4 | 80.00% |
Paolo 'Blaisorblade' Giarrusso | 7 | 6.03% | 1 | 20.00% |
Total | 116 | 100.00% | 5 | 100.00% |
#define NO_WAITER_MSG \
"****\n" \
"There are currently no UML consoles waiting for port connections.\n" \
"Either disconnect from one to make it available or activate some more\n" \
"by enabling more consoles in the UML /etc/inittab.\n" \
"****\n"
static int port_accept(struct port_list *port)
{
struct connection *conn;
int fd, socket[2], pid;
fd = port_connection(port->fd, socket, &pid);
if (fd < 0) {
if (fd != -EAGAIN)
printk(KERN_ERR "port_accept : port_connection "
"returned %d\n", -fd);
goto out;
}
conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
if (conn == NULL) {
printk(KERN_ERR "port_accept : failed to allocate "
"connection\n");
goto out_close;
}
*conn = ((struct connection)
{ .list = LIST_HEAD_INIT(conn->list),
.fd = fd,
.socket = { socket[0], socket[1] },
.telnetd_pid = pid,
.port = port });
if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
IRQF_SHARED, "telnetd", conn)) {
printk(KERN_ERR "port_accept : failed to get IRQ for "
"telnetd\n");
goto out_free;
}
if (atomic_read(&port->wait_count) == 0) {
os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
printk(KERN_ERR "No one waiting for port\n");
}
list_add(&conn->list, &port->pending);
return 1;
out_free:
kfree(conn);
out_close:
os_close_file(fd);
os_kill_process(pid, 1);
out:
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 255 | 98.84% | 9 | 81.82% |
Paolo 'Blaisorblade' Giarrusso | 2 | 0.78% | 1 | 9.09% |
Thomas Gleixner | 1 | 0.39% | 1 | 9.09% |
Total | 258 | 100.00% | 11 | 100.00% |
static DEFINE_MUTEX(ports_mutex);
static LIST_HEAD(ports);
static void port_work_proc(struct work_struct *unused)
{
struct port_list *port;
struct list_head *ele;
unsigned long flags;
local_irq_save(flags);
list_for_each(ele, &ports) {
port = list_entry(ele, struct port_list, list);
if (!port->has_connection)
continue;
reactivate_fd(port->fd, ACCEPT_IRQ);
while (port_accept(port))
;
port->has_connection = 0;
}
local_irq_restore(flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 84 | 97.67% | 2 | 66.67% |
David Howells | 2 | 2.33% | 1 | 33.33% |
Total | 86 | 100.00% | 3 | 100.00% |
DECLARE_WORK(port_work, port_work_proc);
static irqreturn_t port_interrupt(int irq, void *data)
{
struct port_list *port = data;
port->has_connection = 1;
schedule_work(&port_work);
return IRQ_HANDLED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 31 | 88.57% | 1 | 50.00% |
Paolo 'Blaisorblade' Giarrusso | 4 | 11.43% | 1 | 50.00% |
Total | 35 | 100.00% | 2 | 100.00% |
void *port_data(int port_num)
{
struct list_head *ele;
struct port_list *port;
struct port_dev *dev = NULL;
int fd;
mutex_lock(&ports_mutex);
list_for_each(ele, &ports) {
port = list_entry(ele, struct port_list, list);
if (port->port == port_num)
goto found;
}
port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
if (port == NULL) {
printk(KERN_ERR "Allocation of port list failed\n");
goto out;
}
fd = port_listen_fd(port_num);
if (fd < 0) {
printk(KERN_ERR "binding to port %d failed, errno = %d\n",
port_num, -fd);
goto out_free;
}
if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
IRQF_SHARED, "port", port)) {
printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
goto out_close;
}
*port = ((struct port_list)
{ .list = LIST_HEAD_INIT(port->list),
.wait_count = ATOMIC_INIT(0),
.has_connection = 0,
.port = port_num,
.fd = fd,
.pending = LIST_HEAD_INIT(port->pending),
.connections = LIST_HEAD_INIT(port->connections) });
spin_lock_init(&port->lock);
init_completion(&port->done);
list_add(&port->list, &ports);
found:
dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
if (dev == NULL) {
printk(KERN_ERR "Allocation of port device entry failed\n");
goto out;
}
*dev = ((struct port_dev) { .port = port,
.helper_pid = -1,
.telnetd_pid = -1 });
goto out;
out_close:
os_close_file(fd);
out_free:
kfree(port);
out:
mutex_unlock(&ports_mutex);
return dev;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 321 | 96.11% | 7 | 70.00% |
Domen Puncer | 8 | 2.40% | 1 | 10.00% |
Daniel Walker | 4 | 1.20% | 1 | 10.00% |
Thomas Gleixner | 1 | 0.30% | 1 | 10.00% |
Total | 334 | 100.00% | 10 | 100.00% |
int port_wait(void *data)
{
struct port_dev *dev = data;
struct connection *conn;
struct port_list *port = dev->port;
int fd;
atomic_inc(&port->wait_count);
while (1) {
fd = -ERESTARTSYS;
if (wait_for_completion_interruptible(&port->done))
goto out;
spin_lock(&port->lock);
conn = list_entry(port->connections.next, struct connection,
list);
list_del(&conn->list);
spin_unlock(&port->lock);
os_shutdown_socket(conn->socket[0], 1, 1);
os_close_file(conn->socket[0]);
os_shutdown_socket(conn->socket[1], 1, 1);
os_close_file(conn->socket[1]);
/* This is done here because freeing an IRQ can't be done
* within the IRQ handler. So, pipe_interrupt always ups
* the semaphore regardless of whether it got a successful
* connection. Then we loop here throwing out failed
* connections until a good one is found.
*/
um_free_irq(TELNETD_IRQ, conn);
if (conn->fd >= 0)
break;
os_close_file(conn->fd);
kfree(conn);
}
fd = conn->fd;
dev->helper_pid = conn->helper_pid;
dev->telnetd_pid = conn->telnetd_pid;
kfree(conn);
out:
atomic_dec(&port->wait_count);
return fd;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 221 | 99.55% | 5 | 83.33% |
Richard Weinberger | 1 | 0.45% | 1 | 16.67% |
Total | 222 | 100.00% | 6 | 100.00% |
void port_remove_dev(void *d)
{
struct port_dev *dev = d;
if (dev->helper_pid != -1)
os_kill_process(dev->helper_pid, 0);
if (dev->telnetd_pid != -1)
os_kill_process(dev->telnetd_pid, 1);
dev->helper_pid = -1;
dev->telnetd_pid = -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 66 | 100.00% | 4 | 100.00% |
Total | 66 | 100.00% | 4 | 100.00% |
void port_kern_free(void *d)
{
struct port_dev *dev = d;
port_remove_dev(dev);
kfree(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 26 | 100.00% | 2 | 100.00% |
Total | 26 | 100.00% | 2 | 100.00% |
static void free_port(void)
{
struct list_head *ele;
struct port_list *port;
list_for_each(ele, &ports) {
port = list_entry(ele, struct port_list, list);
free_irq_by_fd(port->fd);
os_close_file(port->fd);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
__uml_exitcall(free_port);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 1287 | 96.12% | 16 | 64.00% |
Paolo 'Blaisorblade' Giarrusso | 17 | 1.27% | 1 | 4.00% |
Al Viro | 10 | 0.75% | 1 | 4.00% |
Domen Puncer | 8 | 0.60% | 1 | 4.00% |
Daniel Walker | 8 | 0.60% | 1 | 4.00% |
Thomas Gleixner | 2 | 0.15% | 1 | 4.00% |
Alexey Dobriyan | 2 | 0.15% | 1 | 4.00% |
Tejun Heo | 2 | 0.15% | 1 | 4.00% |
David Howells | 2 | 0.15% | 1 | 4.00% |
Richard Weinberger | 1 | 0.07% | 1 | 4.00% |
Total | 1339 | 100.00% | 25 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.