Release 4.11 drivers/staging/greybus/gpio.c
/*
* GPIO Greybus driver.
*
* Copyright 2014 Google Inc.
* Copyright 2014 Linaro Ltd.
*
* Released under the GPLv2 only.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/mutex.h>
#include "greybus.h"
#include "gbphy.h"
struct gb_gpio_line {
/* The following has to be an array of line_max entries */
/* --> make them just a flags field */
u8 active: 1,
direction: 1, /* 0 = output, 1 = input */
value: 1; /* 0 = low, 1 = high */
u16 debounce_usec;
u8 irq_type;
bool irq_type_pending;
bool masked;
bool masked_pending;
};
struct gb_gpio_controller {
struct gbphy_device *gbphy_dev;
struct gb_connection *connection;
u8 line_max; /* max line number */
struct gb_gpio_line *lines;
struct gpio_chip chip;
struct irq_chip irqc;
struct irq_chip *irqchip;
struct irq_domain *irqdomain;
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
struct mutex irq_lock;
};
#define gpio_chip_to_gb_gpio_controller(chip) \
container_of(chip, struct gb_gpio_controller, chip)
#define irq_data_to_gpio_chip(d) (d->domain->host_data)
static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
{
struct gb_gpio_line_count_response response;
int ret;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
NULL, 0, &response, sizeof(response));
if (!ret)
ggc->line_max = response.count;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 45 | 78.95% | 1 | 50.00% |
Greg Kroah-Hartman | 12 | 21.05% | 1 | 50.00% |
Total | 57 | 100.00% | 2 | 100.00% |
static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
{
struct gb_gpio_activate_request request;
struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
int ret;
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
return ret;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
&request, sizeof(request), NULL, 0);
if (ret) {
gbphy_runtime_put_autosuspend(gbphy_dev);
return ret;
}
ggc->lines[which].active = true;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 50 | 49.50% | 2 | 50.00% |
Axel Haslam | 34 | 33.66% | 1 | 25.00% |
Greg Kroah-Hartman | 17 | 16.83% | 1 | 25.00% |
Total | 101 | 100.00% | 4 | 100.00% |
static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
u8 which)
{
struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
struct device *dev = &gbphy_dev->dev;
struct gb_gpio_deactivate_request request;
int ret;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
&request, sizeof(request), NULL, 0);
if (ret) {
dev_err(dev, "failed to deactivate gpio %u\n", which);
goto out_pm_put;
}
ggc->lines[which].active = false;
out_pm_put:
gbphy_runtime_put_autosuspend(gbphy_dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 47 | 44.76% | 2 | 25.00% |
Greg Kroah-Hartman | 26 | 24.76% | 2 | 25.00% |
Axel Haslam | 19 | 18.10% | 1 | 12.50% |
Johan Hovold | 12 | 11.43% | 2 | 25.00% |
Sandeep Patil | 1 | 0.95% | 1 | 12.50% |
Total | 105 | 100.00% | 8 | 100.00% |
static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
u8 which)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_get_direction_request request;
struct gb_gpio_get_direction_response response;
int ret;
u8 direction;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
&request, sizeof(request),
&response, sizeof(response));
if (ret)
return ret;
direction = response.direction;
if (direction && direction != 1) {
dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
which, direction);
}
ggc->lines[which].direction = direction ? 1 : 0;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 86 | 68.80% | 3 | 42.86% |
Greg Kroah-Hartman | 33 | 26.40% | 2 | 28.57% |
Johan Hovold | 5 | 4.00% | 1 | 14.29% |
Sandeep Patil | 1 | 0.80% | 1 | 14.29% |
Total | 125 | 100.00% | 7 | 100.00% |
static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
u8 which)
{
struct gb_gpio_direction_in_request request;
int ret;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
&request, sizeof(request), NULL, 0);
if (!ret)
ggc->lines[which].direction = 1;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 52 | 75.36% | 2 | 66.67% |
Greg Kroah-Hartman | 17 | 24.64% | 1 | 33.33% |
Total | 69 | 100.00% | 3 | 100.00% |
static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
u8 which, bool value_high)
{
struct gb_gpio_direction_out_request request;
int ret;
request.which = which;
request.value = value_high ? 1 : 0;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
&request, sizeof(request), NULL, 0);
if (!ret)
ggc->lines[which].direction = 0;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 61 | 74.39% | 1 | 50.00% |
Greg Kroah-Hartman | 21 | 25.61% | 1 | 50.00% |
Total | 82 | 100.00% | 2 | 100.00% |
static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
u8 which)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_get_value_request request;
struct gb_gpio_get_value_response response;
int ret;
u8 value;
request.which = which;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
&request, sizeof(request),
&response, sizeof(response));
if (ret) {
dev_err(dev, "failed to get value of gpio %u\n", which);
return ret;
}
value = response.value;
if (value && value != 1) {
dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
which, value);
}
ggc->lines[which].value = value ? 1 : 0;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 86 | 63.24% | 3 | 37.50% |
Greg Kroah-Hartman | 33 | 24.26% | 2 | 25.00% |
Johan Hovold | 16 | 11.76% | 2 | 25.00% |
Sandeep Patil | 1 | 0.74% | 1 | 12.50% |
Total | 136 | 100.00% | 8 | 100.00% |
static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
u8 which, bool value_high)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_set_value_request request;
int ret;
if (ggc->lines[which].direction == 1) {
dev_warn(dev, "refusing to set value of input gpio %u\n",
which);
return;
}
request.which = which;
request.value = value_high ? 1 : 0;
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
&request, sizeof(request), NULL, 0);
if (ret) {
dev_err(dev, "failed to set value of gpio %u\n", which);
return;
}
ggc->lines[which].value = request.value;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 57 | 44.19% | 1 | 14.29% |
Johan Hovold | 38 | 29.46% | 3 | 42.86% |
Greg Kroah-Hartman | 33 | 25.58% | 2 | 28.57% |
Sandeep Patil | 1 | 0.78% | 1 | 14.29% |
Total | 129 | 100.00% | 7 | 100.00% |
static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
u8 which, u16 debounce_usec)
{
struct gb_gpio_set_debounce_request request;
int ret;
request.which = which;
request.usec = cpu_to_le16(debounce_usec);
ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
&request, sizeof(request), NULL, 0);
if (!ret)
ggc->lines[which].debounce_usec = debounce_usec;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 58 | 71.60% | 1 | 33.33% |
Greg Kroah-Hartman | 23 | 28.40% | 2 | 66.67% |
Total | 81 | 100.00% | 3 | 100.00% |
static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_irq_mask_request request;
int ret;
request.which = hwirq;
ret = gb_operation_sync(ggc->connection,
GB_GPIO_TYPE_IRQ_MASK,
&request, sizeof(request), NULL, 0);
if (ret)
dev_err(dev, "failed to mask irq: %d\n", ret);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 34 | 45.33% | 1 | 16.67% |
Alex Elder | 19 | 25.33% | 1 | 16.67% |
Greg Kroah-Hartman | 11 | 14.67% | 1 | 16.67% |
Johan Hovold | 10 | 13.33% | 2 | 33.33% |
Sandeep Patil | 1 | 1.33% | 1 | 16.67% |
Total | 75 | 100.00% | 6 | 100.00% |
static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_irq_unmask_request request;
int ret;
request.which = hwirq;
ret = gb_operation_sync(ggc->connection,
GB_GPIO_TYPE_IRQ_UNMASK,
&request, sizeof(request), NULL, 0);
if (ret)
dev_err(dev, "failed to unmask irq: %d\n", ret);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johan Hovold | 53 | 70.67% | 1 | 20.00% |
Greg Kroah-Hartman | 11 | 14.67% | 1 | 20.00% |
Alex Elder | 7 | 9.33% | 1 | 20.00% |
Matt Porter | 3 | 4.00% | 1 | 20.00% |
Sandeep Patil | 1 | 1.33% | 1 | 20.00% |
Total | 75 | 100.00% | 5 | 100.00% |
static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
u8 hwirq, u8 type)
{
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_gpio_irq_type_request request;
int ret;
request.which = hwirq;
request.type = type;
ret = gb_operation_sync(ggc->connection,
GB_GPIO_TYPE_IRQ_TYPE,
&request, sizeof(request), NULL, 0);
if (ret)
dev_err(dev, "failed to set irq type: %d\n", ret);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 29 | 34.52% | 1 | 16.67% |
Johan Hovold | 24 | 28.57% | 2 | 33.33% |
Alex Elder | 19 | 22.62% | 1 | 16.67% |
Greg Kroah-Hartman | 11 | 13.10% | 1 | 16.67% |
Sandeep Patil | 1 | 1.19% | 1 | 16.67% |
Total | 84 | 100.00% | 6 | 100.00% |
static void gb_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
struct gb_gpio_line *line = &ggc->lines[d->hwirq];
line->masked = true;
line->masked_pending = true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johan Hovold | 58 | 100.00% | 1 | 100.00% |
Total | 58 | 100.00% | 1 | 100.00% |
static void gb_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
struct gb_gpio_line *line = &ggc->lines[d->hwirq];
line->masked = false;
line->masked_pending = true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johan Hovold | 58 | 100.00% | 1 | 100.00% |
Total | 58 | 100.00% | 1 | 100.00% |
static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
struct gb_gpio_line *line = &ggc->lines[d->hwirq];
struct device *dev = &ggc->gbphy_dev->dev;
u8 irq_type;
switch (type) {
case IRQ_TYPE_NONE:
irq_type = GB_GPIO_IRQ_TYPE_NONE;
break;
case IRQ_TYPE_EDGE_RISING:
irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
break;
case IRQ_TYPE_LEVEL_LOW:
irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
break;
case IRQ_TYPE_LEVEL_HIGH:
irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
break;
default:
dev_err(dev, "unsupported irq type: %u\n", type);
return -EINVAL;
}
line->irq_type = irq_type;
line->irq_type_pending = true;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johan Hovold | 69 | 46.62% | 2 | 33.33% |
Matt Porter | 46 | 31.08% | 1 | 16.67% |
Alex Elder | 21 | 14.19% | 1 | 16.67% |
Greg Kroah-Hartman | 11 | 7.43% | 1 | 16.67% |
Sandeep Patil | 1 | 0.68% | 1 | 16.67% |
Total | 148 | 100.00% | 6 | 100.00% |
static void gb_gpio_irq_bus_lock(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
mutex_lock(&ggc->irq_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johan Hovold | 37 | 94.87% | 1 | 50.00% |
Alex Elder | 2 | 5.13% | 1 | 50.00% |
Total | 39 | 100.00% | 2 | 100.00% |
static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
{
struct gpio_chip *chip = irq_data_to_gpio_chip(d);
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
struct gb_gpio_line *line = &ggc->lines[d->hwirq];
if (line->irq_type_pending) {
_gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
line->irq_type_pending = false;
}
if (line->masked_pending) {
if (line->masked)
_gb_gpio_irq_mask(ggc, d->hwirq);
else
_gb_gpio_irq_unmask(ggc, d->hwirq);
line->masked_pending = false;
}
mutex_unlock(&ggc->irq_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johan Hovold | 106 | 88.33% | 2 | 50.00% |
Matt Porter | 10 | 8.33% | 1 | 25.00% |
Alex Elder | 4 | 3.33% | 1 | 25.00% |
Total | 120 | 100.00% | 4 | 100.00% |
static int gb_gpio_request_handler(struct gb_operation *op)
{
struct gb_connection *connection = op->connection;
struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
struct device *dev = &ggc->gbphy_dev->dev;
struct gb_message *request;
struct gb_gpio_irq_event_request *event;
u8 type = op->type;
int irq;
struct irq_desc *desc;
if (type != GB_GPIO_TYPE_IRQ_EVENT) {
dev_err(dev, "unsupported unsolicited request: %u\n", type);
return -EINVAL;
}
request = op->request;
if (request->payload_size < sizeof(*event)) {
dev_err(dev, "short event received (%zu < %zu)\n",
request->payload_size, sizeof(*event));
return -EINVAL;
}
event = request->payload;
if (event->which > ggc->line_max) {
dev_err(dev, "invalid hw irq: %d\n", event->which);
return -EINVAL;
}
irq = irq_find_mapping(ggc->irqdomain, event->which);
if (!irq) {
dev_err(dev, "failed to find IRQ\n");
return -EINVAL;
}
desc = irq_to_desc(irq);
if (!desc) {
dev_err(dev, "failed to look up irq\n");
return -EINVAL;
}
local_irq_disable();
generic_handle_irq_desc(desc);
local_irq_enable();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johan Hovold | 98 | 42.24% | 6 | 46.15% |
Matt Porter | 81 | 34.91% | 1 | 7.69% |
Greg Kroah-Hartman | 27 | 11.64% | 3 | 23.08% |
Alex Elder | 14 | 6.03% | 1 | 7.69% |
Viresh Kumar | 11 | 4.74% | 1 | 7.69% |
Sandeep Patil | 1 | 0.43% | 1 | 7.69% |
Total | 232 | 100.00% | 13 | 100.00% |
static int gb_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
return gb_gpio_activate_operation(ggc, (u8)offset);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 23 | 63.89% | 1 | 20.00% |
Matt Porter | 9 | 25.00% | 1 | 20.00% |
Johan Hovold | 3 | 8.33% | 2 | 40.00% |
Roman Sommer | 1 | 2.78% | 1 | 20.00% |
Total | 36 | 100.00% | 5 | 100.00% |
static void gb_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
gb_gpio_deactivate_operation(ggc, (u8)offset);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Elder | 25 | 71.43% | 1 | 16.67% |
Matt Porter | 6 | 17.14% | 2 | 33.33% |
Johan Hovold | 2 | 5.71% | 1 | 16.67% |
Greg Kroah-Hartman | 1 | 2.86% | 1 | 16.67% |
Roman Sommer | 1 | 2.86% | 1 | 16.67% |
Total | 35 | 100.00% | 6 | 100.00% |
static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
u8 which;
int ret;
which = (u8)offset;
ret = gb_gpio_get_direction_operation(ggc, which);
if (ret)
return ret;
return ggc->lines[which].direction ? 1 : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 34 | 50.00% | 1 | 16.67% |
Alex Elder | 19 | 27.94% | 1 | 16.67% |
Greg Kroah-Hartman | 8 | 11.76% | 1 | 16.67% |
Johan Hovold | 6 | 8.82% | 2 | 33.33% |
Roman Sommer | 1 | 1.47% | 1 | 16.67% |
Total | 68 | 100.00% | 6 | 100.00% |
static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
return gb_gpio_direction_in_operation(ggc, (u8)offset);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 30 | 83.33% | 1 | 20.00% |
Johan Hovold | 3 | 8.33% | 2 | 40.00% |
Alex Elder | 2 | 5.56% | 1 | 20.00% |
Roman Sommer | 1 | 2.78% | 1 | 20.00% |
Total | 36 | 100.00% | 5 | 100.00% |
static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 39 | 90.70% | 1 | 25.00% |
Johan Hovold | 3 | 6.98% | 2 | 50.00% |
Roman Sommer | 1 | 2.33% | 1 | 25.00% |
Total | 43 | 100.00% | 4 | 100.00% |
static int gb_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
u8 which;
int ret;
which = (u8)offset;
ret = gb_gpio_get_value_operation(ggc, which);
if (ret)
return ret;
return ggc->lines[which].value;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 60 | 93.75% | 1 | 33.33% |
Johan Hovold | 3 | 4.69% | 1 | 33.33% |
Roman Sommer | 1 | 1.56% | 1 | 33.33% |
Total | 64 | 100.00% | 3 | 100.00% |
static void gb_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 39 | 92.86% | 1 | 33.33% |
Johan Hovold | 2 | 4.76% | 1 | 33.33% |
Roman Sommer | 1 | 2.38% | 1 | 33.33% |
Total | 42 | 100.00% | 3 | 100.00% |
static int gb_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
unsigned long config)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
u32 debounce;
if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
return -ENOTSUPP;
debounce = pinconf_to_config_argument(config);
if (debounce > U16_MAX)
return -EINVAL;
return gb_gpio_set_debounce_operation(ggc, (u8)offset, (u16)debounce);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 45 | 57.69% | 1 | 20.00% |
Mika Westerberg | 29 | 37.18% | 1 | 20.00% |
Johan Hovold | 3 | 3.85% | 2 | 40.00% |
Roman Sommer | 1 | 1.28% | 1 | 20.00% |
Total | 78 | 100.00% | 5 | 100.00% |
static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
{
int ret;
/* Now find out how many lines there are */
ret = gb_gpio_line_count_operation(ggc);
if (ret)
return ret;
ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
GFP_KERNEL);
if (!ggc->lines)
return -ENOMEM;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 52 | 78.79% | 1 | 25.00% |
Johan Hovold | 14 | 21.21% | 3 | 75.00% |
Total | 66 | 100.00% | 4 | 100.00% |
/**
* gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip
* @d: the irqdomain used by this irqchip
* @irq: the global irq number used by this GB gpio irqchip irq
* @hwirq: the local IRQ/GPIO line offset on this GB gpio
*
* This function will set up the mapping for a certain IRQ line on a
* GB gpio by assigning the GB gpio as chip data, and using the irqchip
* stored inside the GB gpio.
*/
static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
struct gpio_chip *chip = domain->host_data;
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
irq_set_chip_data(irq, ggc);
irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
irq_set_noprobe(irq);
/*
* No set-up of the hardware will happen if IRQ_TYPE_NONE
* is passed as default type.
*/
if (ggc->irq_default_type != IRQ_TYPE_NONE)
irq_set_irq_type(irq, ggc->irq_default_type);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 83 | 100.00% | 1 | 100.00% |
Total | 83 | 100.00% | 1 | 100.00% |
static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
{
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
static const struct irq_domain_ops gb_gpio_domain_ops = {
.map = gb_gpio_irq_map,
.unmap = gb_gpio_irq_unmap,
};
/**
* gb_gpio_irqchip_remove() - removes an irqchip added to a gb_gpio_controller
* @ggc: the gb_gpio_controller to remove the irqchip from
*
* This is called only from gb_gpio_remove()
*/
static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
{
unsigned int offset;
/* Remove all IRQ mappings and delete the domain */
if (ggc->irqdomain) {
for (offset = 0; offset < (ggc->line_max + 1); offset++)
irq_dispose_mapping(irq_find_mapping(ggc->irqdomain,
offset));
irq_domain_remove(ggc->irqdomain);
}
if (ggc->irqchip)
ggc->irqchip = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 74 | 100.00% | 1 | 100.00% |
Total | 74 | 100.00% | 1 | 100.00% |
/**
* gb_gpio_irqchip_add() - adds an irqchip to a gpio chip
* @chip: the gpio chip to add the irqchip to
* @irqchip: the irqchip to add to the adapter
* @first_irq: if not dynamically assigned, the base (first) IRQ to
* allocate gpio irqs from
* @handler: the irq handler to use (often a predefined irq core function)
* @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
* to have the core avoid setting up any default type in the hardware.
*
* This function closely associates a certain irqchip with a certain
* gpio chip, providing an irq domain to translate the local IRQs to
* global irqs, and making sure that the gpio chip
* is passed as chip data to all related functions. Driver callbacks
* need to use container_of() to get their local state containers back
* from the gpio chip passed as chip data. An irqdomain will be stored
* in the gpio chip that shall be used by the driver to handle IRQ number
* translation. The gpio chip will need to be initialized and registered
* before calling this function.
*/
static int gb_gpio_irqchip_add(struct gpio_chip *chip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
struct gb_gpio_controller *ggc;
unsigned int offset;
unsigned int irq_base;
if (!chip || !irqchip)
return -EINVAL;
ggc = gpio_chip_to_gb_gpio_controller(chip);
ggc->irqchip = irqchip;
ggc->irq_handler = handler;
ggc->irq_default_type = type;
ggc->irqdomain = irq_domain_add_simple(NULL,
ggc->line_max + 1, first_irq,
&gb_gpio_domain_ops, chip);
if (!ggc->irqdomain) {
ggc->irqchip = NULL;
return -EINVAL;
}
/*
* Prepare the mapping since the irqchip shall be orthogonal to
* any gpio calls. If the first_irq was zero, this is
* necessary to allocate descriptors for all IRQs.
*/
for (offset = 0; offset < (ggc->line_max + 1); offset++) {
irq_base = irq_create_mapping(ggc->irqdomain, offset);
if (offset == 0)
ggc->irq_base = irq_base;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 146 | 87.95% | 2 | 40.00% |
Alex Elder | 15 | 9.04% | 1 | 20.00% |
Greg Kroah-Hartman | 4 | 2.41% | 1 | 20.00% |
Roman Sommer | 1 | 0.60% | 1 | 20.00% |
Total | 166 | 100.00% | 5 | 100.00% |
static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
{
struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
return irq_find_mapping(ggc->irqdomain, offset);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Kroah-Hartman | 13 | 37.14% | 1 | 25.00% |
Alex Elder | 11 | 31.43% | 1 | 25.00% |
Matt Porter | 10 | 28.57% | 1 | 25.00% |
Roman Sommer | 1 | 2.86% | 1 | 25.00% |
Total | 35 | 100.00% | 4 | 100.00% |
static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
const struct gbphy_device_id *id)
{
struct gb_connection *connection;
struct gb_gpio_controller *ggc;
struct gpio_chip *gpio;
struct irq_chip *irqc;
int ret;
ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
if (!ggc)
return -ENOMEM;
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
gb_gpio_request_handler);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
goto exit_ggc_free;
}
ggc->connection = connection;
gb_connection_set_data(connection, ggc);
ggc->gbphy_dev = gbphy_dev;
gb_gbphy_set_data(gbphy_dev, ggc);
ret = gb_connection_enable_tx(connection);
if (ret)
goto exit_connection_destroy;
ret = gb_gpio_controller_setup(ggc);
if (ret)
goto exit_connection_disable;
irqc = &ggc->irqc;
irqc->irq_mask = gb_gpio_irq_mask;
irqc->irq_unmask = gb_gpio_irq_unmask;
irqc->irq_set_type = gb_gpio_irq_set_type;
irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
irqc->name = "greybus_gpio";
mutex_init(&ggc->irq_lock);
gpio = &ggc->chip;
gpio->label = "greybus_gpio";
gpio->parent = &gbphy_dev->dev;
gpio->owner = THIS_MODULE;
gpio->request = gb_gpio_request;
gpio->free = gb_gpio_free;
gpio->get_direction = gb_gpio_get_direction;
gpio->direction_input = gb_gpio_direction_input;
gpio->direction_output = gb_gpio_direction_output;
gpio->get = gb_gpio_get;
gpio->set = gb_gpio_set;
gpio->set_config = gb_gpio_set_config;
gpio->to_irq = gb_gpio_to_irq;
gpio->base = -1; /* Allocate base dynamically */
gpio->ngpio = ggc->line_max + 1;
gpio->can_sleep = true;
ret = gb_connection_enable(connection);
if (ret)
goto exit_line_free;
ret = gb_gpio_irqchip_add(gpio, irqc, 0,
handle_level_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(&gbphy_dev->dev, "failed to add irq chip: %d\n", ret);
goto exit_line_free;
}
ret = gpiochip_add(gpio);
if (ret) {
dev_err(&gbphy_dev->dev, "failed to add gpio chip: %d\n", ret);
goto exit_gpio_irqchip_remove;
}
gbphy_runtime_put_autosuspend(gbphy_dev);
return 0;
exit_gpio_irqchip_remove:
gb_gpio_irqchip_remove(ggc);
exit_line_free:
kfree(ggc->lines);
exit_connection_disable:
gb_connection_disable(connection);
exit_connection_destroy:
gb_connection_destroy(connection);
exit_ggc_free:
kfree(ggc);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Kroah-Hartman | 226 | 51.60% | 7 | 33.33% |
Alex Elder | 65 | 14.84% | 3 | 14.29% |
Matt Porter | 61 | 13.93% | 1 | 4.76% |
Johan Hovold | 51 | 11.64% | 5 | 23.81% |
Viresh Kumar | 18 | 4.11% | 2 | 9.52% |
Sandeep Patil | 10 | 2.28% | 1 | 4.76% |
Axel Haslam | 5 | 1.14% | 1 | 4.76% |
Mika Westerberg | 2 | 0.46% | 1 | 4.76% |
Total | 438 | 100.00% | 21 | 100.00% |
static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
{
struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
struct gb_connection *connection = ggc->connection;
int ret;
ret = gbphy_runtime_get_sync(gbphy_dev);
if (ret)
gbphy_runtime_get_noresume(gbphy_dev);
gb_connection_disable_rx(connection);
gpiochip_remove(&ggc->chip);
gb_gpio_irqchip_remove(ggc);
gb_connection_disable(connection);
gb_connection_destroy(connection);
kfree(ggc->lines);
kfree(ggc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Greg Kroah-Hartman | 47 | 52.81% | 6 | 46.15% |
Axel Haslam | 19 | 21.35% | 1 | 7.69% |
Johan Hovold | 11 | 12.36% | 2 | 15.38% |
Viresh Kumar | 5 | 5.62% | 1 | 7.69% |
Sandeep Patil | 4 | 4.49% | 1 | 7.69% |
Alex Elder | 3 | 3.37% | 2 | 15.38% |
Total | 89 | 100.00% | 13 | 100.00% |
static const struct gbphy_device_id gb_gpio_id_table[] = {
{ GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
{ },
};
MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
static struct gbphy_driver gpio_driver = {
.name = "gpio",
.probe = gb_gpio_probe,
.remove = gb_gpio_remove,
.id_table = gb_gpio_id_table,
};
module_gbphy_driver(gpio_driver);
MODULE_LICENSE("GPL v2");
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Porter | 982 | 29.07% | 2 | 4.17% |
Alex Elder | 855 | 25.31% | 9 | 18.75% |
Johan Hovold | 704 | 20.84% | 15 | 31.25% |
Greg Kroah-Hartman | 643 | 19.03% | 14 | 29.17% |
Axel Haslam | 77 | 2.28% | 1 | 2.08% |
Viresh Kumar | 45 | 1.33% | 4 | 8.33% |
Sandeep Patil | 31 | 0.92% | 1 | 2.08% |
Mika Westerberg | 31 | 0.92% | 1 | 2.08% |
Roman Sommer | 10 | 0.30% | 1 | 2.08% |
Total | 3378 | 100.00% | 48 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.