Release 4.17 net/tipc/name_distr.c
/*
* net/tipc/name_distr.c: TIPC name distribution code
*
* Copyright (c) 2000-2006, 2014, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "link.h"
#include "name_distr.h"
int sysctl_tipc_named_timeout __read_mostly = 2000;
struct distr_queue_item {
struct distr_item i;
u32 dtype;
u32 node;
unsigned long expires;
struct list_head next;
};
/**
* publ_to_item - add publication info to a publication message
*/
static void publ_to_item(struct distr_item *i, struct publication *p)
{
i->type = htonl(p->type);
i->lower = htonl(p->lower);
i->upper = htonl(p->upper);
i->port = htonl(p->port);
i->key = htonl(p->key);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Per Liden | 69 | 97.18% | 1 | 50.00% |
| Jon Paul Maloy | 2 | 2.82% | 1 | 50.00% |
| Total | 71 | 100.00% | 2 | 100.00% |
/**
* named_prepare_buf - allocate & initialize a publication message
*
* The buffer returned is of size INT_H_SIZE + payload size
*/
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
{
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
u32 self = tipc_own_addr(net);
struct tipc_msg *msg;
if (buf != NULL) {
msg = buf_msg(buf);
tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
type, INT_H_SIZE, dest);
msg_set_size(msg, INT_H_SIZE + size);
}
return buf;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Per Liden | 69 | 75.82% | 1 | 14.29% |
| Jon Paul Maloy | 9 | 9.89% | 1 | 14.29% |
| Ying Xue | 6 | 6.59% | 1 | 14.29% |
| Allan Stephens | 4 | 4.40% | 2 | 28.57% |
| Parthasarathy Bhuvaragan | 2 | 2.20% | 1 | 14.29% |
| Stephen Hemminger | 1 | 1.10% | 1 | 14.29% |
| Total | 91 | 100.00% | 7 | 100.00% |
/**
* tipc_named_publish - tell other nodes about a new publication by this node
*/
struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
{
struct name_table *nt = tipc_name_table(net);
struct distr_item *item;
struct sk_buff *skb;
if (publ->scope == TIPC_NODE_SCOPE) {
list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
return NULL;
}
list_add_tail_rcu(&publ->binding_node, &nt->cluster_scope);
skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
if (!skb) {
pr_warn("Publication distribution failure\n");
return NULL;
}
item = (struct distr_item *)msg_data(buf_msg(skb));
publ_to_item(item, publ);
return skb;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Per Liden | 63 | 48.84% | 2 | 18.18% |
| Jon Paul Maloy | 37 | 28.68% | 2 | 18.18% |
| Ying Xue | 27 | 20.93% | 5 | 45.45% |
| Allan Stephens | 1 | 0.78% | 1 | 9.09% |
| Erik Hugne | 1 | 0.78% | 1 | 9.09% |
| Total | 129 | 100.00% | 11 | 100.00% |
/**
* tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
*/
struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
{
struct sk_buff *buf;
struct distr_item *item;
list_del(&publ->binding_node);
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
pr_warn("Withdrawal distribution failure\n");
return NULL;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
publ_to_item(item, publ);
return buf;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Per Liden | 70 | 70.71% | 2 | 25.00% |
| Ying Xue | 18 | 18.18% | 2 | 25.00% |
| Allan Stephens | 8 | 8.08% | 1 | 12.50% |
| Erik Hugne | 1 | 1.01% | 1 | 12.50% |
| Jon Paul Maloy | 1 | 1.01% | 1 | 12.50% |
| Lucas De Marchi | 1 | 1.01% | 1 | 12.50% |
| Total | 99 | 100.00% | 8 | 100.00% |
/**
* named_distribute - prepare name info for bulk distribution to another node
* @list: list of messages (buffers) to be returned from this function
* @dnode: node to be updated
* @pls: linked list of publication items to be packed into buffer chain
*/
static void named_distribute(struct net *net, struct sk_buff_head *list,
u32 dnode, struct list_head *pls)
{
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
ITEM_SIZE) * ITEM_SIZE;
u32 msg_rem = msg_dsz;
list_for_each_entry(publ, pls, binding_node) {
/* Prepare next buffer: */
if (!skb) {
skb = named_prepare_buf(net, PUBLICATION, msg_rem,
dnode);
if (!skb) {
pr_warn("Bulk publication failure\n");
return;
}
msg_set_bc_ack_invalid(buf_msg(skb), true);
item = (struct distr_item *)msg_data(buf_msg(skb));
}
/* Pack publication into message: */
publ_to_item(item, publ);
item++;
msg_rem -= ITEM_SIZE;
/* Append full buffer to list: */
if (!msg_rem) {
__skb_queue_tail(list, skb);
skb = NULL;
msg_rem = msg_dsz;
}
}
if (skb) {
msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem));
skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
__skb_queue_tail(list, skb);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Per Liden | 81 | 36.99% | 1 | 5.88% |
| Ying Xue | 69 | 31.51% | 5 | 29.41% |
| Jon Paul Maloy | 37 | 16.89% | 3 | 17.65% |
| Allan Stephens | 21 | 9.59% | 4 | 23.53% |
| Parthasarathy Bhuvaragan | 6 | 2.74% | 1 | 5.88% |
| Sam Ravnborg | 2 | 0.91% | 1 | 5.88% |
| Paul Gortmaker | 2 | 0.91% | 1 | 5.88% |
| Erik Hugne | 1 | 0.46% | 1 | 5.88% |
| Total | 219 | 100.00% | 17 | 100.00% |
/**
* tipc_named_node_up - tell specified node about all publications by this node
*/
void tipc_named_node_up(struct net *net, u32 dnode)
{
struct name_table *nt = tipc_name_table(net);
struct sk_buff_head head;
__skb_queue_head_init(&head);
rcu_read_lock();
named_distribute(net, &head, dnode, &nt->cluster_scope);
rcu_read_unlock();
tipc_node_xmit(net, &head, dnode, 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Ying Xue | 32 | 48.48% | 5 | 38.46% |
| Allan Stephens | 21 | 31.82% | 3 | 23.08% |
| Jon Paul Maloy | 11 | 16.67% | 4 | 30.77% |
| Per Liden | 2 | 3.03% | 1 | 7.69% |
| Total | 66 | 100.00% | 13 | 100.00% |
/**
* tipc_publ_purge - remove publication associated with a failed node
*
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
*/
static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
{
struct tipc_net *tn = tipc_net(net);
struct publication *p;
spin_lock_bh(&tn->nametbl_lock);
p = tipc_nametbl_remove_publ(net, publ->type, publ->lower, publ->upper,
publ->node, publ->key);
if (p)
tipc_node_unsubscribe(net, &p->binding_node, addr);
spin_unlock_bh(&tn->nametbl_lock);
if (p != publ) {
pr_err("Unable to remove publication from failed node\n"
" (type=%u, lower=%u, node=0x%x, port=%u, key=%u)\n",
publ->type, publ->lower, publ->node, publ->port,
publ->key);
}
kfree_rcu(p, rcu);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Per Liden | 50 | 37.31% | 2 | 15.38% |
| Allan Stephens | 39 | 29.10% | 2 | 15.38% |
| Ying Xue | 35 | 26.12% | 4 | 30.77% |
| Jon Paul Maloy | 9 | 6.72% | 4 | 30.77% |
| Erik Hugne | 1 | 0.75% | 1 | 7.69% |
| Total | 134 | 100.00% | 13 | 100.00% |
/**
* tipc_dist_queue_purge - remove deferred updates from a node that went down
*/
static void tipc_dist_queue_purge(struct net *net, u32 addr)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct distr_queue_item *e, *tmp;
spin_lock_bh(&tn->nametbl_lock);
list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
if (e->node != addr)
continue;
list_del(&e->next);
kfree(e);
}
spin_unlock_bh(&tn->nametbl_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Erik Hugne | 84 | 100.00% | 1 | 100.00% |
| Total | 84 | 100.00% | 1 | 100.00% |
void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
{
struct publication *publ, *tmp;
list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
tipc_publ_purge(net, publ, addr);
tipc_dist_queue_purge(net, addr);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Ying Xue | 44 | 84.62% | 2 | 50.00% |
| Erik Hugne | 7 | 13.46% | 1 | 25.00% |
| Jon Paul Maloy | 1 | 1.92% | 1 | 25.00% |
| Total | 52 | 100.00% | 4 | 100.00% |
/**
* tipc_update_nametbl - try to process a nametable update and notify
* subscribers
*
* tipc_nametbl_lock must be held.
* Returns the publication item if successful, otherwise NULL.
*/
static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
u32 node, u32 dtype)
{
struct publication *p = NULL;
u32 lower = ntohl(i->lower);
u32 upper = ntohl(i->upper);
u32 type = ntohl(i->type);
u32 port = ntohl(i->port);
u32 key = ntohl(i->key);
if (dtype == PUBLICATION) {
p = tipc_nametbl_insert_publ(net, type, lower, upper,
TIPC_CLUSTER_SCOPE, node,
port, key);
if (p) {
tipc_node_subscribe(net, &p->binding_node, node);
return true;
}
} else if (dtype == WITHDRAWAL) {
p = tipc_nametbl_remove_publ(net, type, lower,
upper, node, key);
if (p) {
tipc_node_unsubscribe(net, &p->binding_node, node);
kfree_rcu(p, rcu);
return true;
}
pr_warn_ratelimited("Failed to remove binding %u,%u from %x\n",
type, lower, node);
} else {
pr_warn("Unrecognized name table message received\n");
}
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Jon Paul Maloy | 76 | 37.07% | 5 | 35.71% |
| Per Liden | 73 | 35.61% | 2 | 14.29% |
| Erik Hugne | 40 | 19.51% | 3 | 21.43% |
| Ying Xue | 16 | 7.80% | 4 | 28.57% |
| Total | 205 | 100.00% | 14 | 100.00% |
/**
* tipc_named_rcv - process name table update messages sent by another node
*/
void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_msg *msg;
struct distr_item *item;
uint count;
u32 node;
struct sk_buff *skb;
int mtype;
spin_lock_bh(&tn->nametbl_lock);
for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
skb_linearize(skb);
msg = buf_msg(skb);
mtype = msg_type(msg);
item = (struct distr_item *)msg_data(msg);
count = msg_data_sz(msg) / ITEM_SIZE;
node = msg_orignode(msg);
while (count--) {
tipc_update_nametbl(net, item, node, mtype);
item++;
}
kfree_skb(skb);
}
spin_unlock_bh(&tn->nametbl_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Jon Paul Maloy | 76 | 47.50% | 2 | 22.22% |
| Erik Hugne | 43 | 26.88% | 2 | 22.22% |
| Ying Xue | 23 | 14.38% | 3 | 33.33% |
| Per Liden | 10 | 6.25% | 1 | 11.11% |
| Allan Stephens | 8 | 5.00% | 1 | 11.11% |
| Total | 160 | 100.00% | 9 | 100.00% |
/**
* tipc_named_reinit - re-initialize local publications
*
* This routine is called whenever TIPC networking is enabled.
* All name table entries published by this node are updated to reflect
* the node's new network address.
*/
void tipc_named_reinit(struct net *net)
{
struct name_table *nt = tipc_name_table(net);
struct tipc_net *tn = tipc_net(net);
struct publication *publ;
u32 self = tipc_own_addr(net);
spin_lock_bh(&tn->nametbl_lock);
list_for_each_entry_rcu(publ, &nt->node_scope, binding_node)
publ->node = self;
list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node)
publ->node = self;
spin_unlock_bh(&tn->nametbl_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Jon Paul Maloy | 35 | 40.23% | 3 | 37.50% |
| Per Liden | 29 | 33.33% | 2 | 25.00% |
| Ying Xue | 22 | 25.29% | 2 | 25.00% |
| Allan Stephens | 1 | 1.15% | 1 | 12.50% |
| Total | 87 | 100.00% | 8 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Per Liden | 528 | 36.49% | 2 | 4.35% |
| Jon Paul Maloy | 295 | 20.39% | 13 | 28.26% |
| Ying Xue | 294 | 20.32% | 10 | 21.74% |
| Erik Hugne | 210 | 14.51% | 5 | 10.87% |
| Allan Stephens | 105 | 7.26% | 10 | 21.74% |
| Parthasarathy Bhuvaragan | 9 | 0.62% | 2 | 4.35% |
| Paul Gortmaker | 2 | 0.14% | 1 | 2.17% |
| Sam Ravnborg | 2 | 0.14% | 1 | 2.17% |
| Stephen Hemminger | 1 | 0.07% | 1 | 2.17% |
| Lucas De Marchi | 1 | 0.07% | 1 | 2.17% |
| Total | 1447 | 100.00% | 46 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.