Release 4.11 net/bridge/br_netlink.c
/*
* Bridge netlink control interface
*
* Authors:
* Stephen Hemminger <shemminger@osdl.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <uapi/linux/if_bridge.h>
#include "br_private.h"
#include "br_private_stp.h"
#include "br_private_tunnel.h"
static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
u32 filter_mask)
{
struct net_bridge_vlan *v;
u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
u16 flags, pvid;
int num_vlans = 0;
if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
return 0;
pvid = br_get_pvid(vg);
/* Count number of vlan infos */
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
flags = 0;
/* only a context, bridge vlan not activated */
if (!br_vlan_should_use(v))
continue;
if (v->vid == pvid)
flags |= BRIDGE_VLAN_INFO_PVID;
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (vid_range_start == 0) {
goto initvars;
} else if ((v->vid - vid_range_end) == 1 &&
flags == vid_range_flags) {
vid_range_end = v->vid;
continue;
} else {
if ((vid_range_end - vid_range_start) > 0)
num_vlans += 2;
else
num_vlans += 1;
}
initvars:
vid_range_start = v->vid;
vid_range_end = v->vid;
vid_range_flags = flags;
}
if (vid_range_start != 0) {
if ((vid_range_end - vid_range_start) > 0)
num_vlans += 2;
else
num_vlans += 1;
}
return num_vlans;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roopa Prabhu | 170 | 78.34% | 1 | 25.00% |
Nikolay Aleksandrov | 47 | 21.66% | 3 | 75.00% |
Total | 217 | 100.00% | 4 | 100.00% |
static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
u32 filter_mask)
{
int num_vlans;
if (!vg)
return 0;
if (filter_mask & RTEXT_FILTER_BRVLAN)
return vg->num_vlans;
rcu_read_lock();
num_vlans = __get_num_vlan_infos(vg, filter_mask);
rcu_read_unlock();
return num_vlans;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 54 | 100.00% | 2 | 100.00% |
Total | 54 | 100.00% | 2 | 100.00% |
static size_t br_get_link_af_size_filtered(const struct net_device *dev,
u32 filter_mask)
{
struct net_bridge_vlan_group *vg = NULL;
struct net_bridge_port *p = NULL;
struct net_bridge *br;
int num_vlan_infos;
size_t vinfo_sz = 0;
rcu_read_lock();
if (br_port_exists(dev)) {
p = br_port_get_rcu(dev);
vg = nbp_vlan_group_rcu(p);
} else if (dev->priv_flags & IFF_EBRIDGE) {
br = netdev_priv(dev);
vg = br_vlan_group_rcu(br);
}
num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
rcu_read_unlock();
if (p && (p->flags & BR_VLAN_TUNNEL))
vinfo_sz += br_get_vlan_tunnel_info_size(vg);
/* Each VLAN is returned in bridge_vlan_info along with flags */
vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
return vinfo_sz;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roopa Prabhu | 101 | 71.63% | 3 | 50.00% |
Nikolay Aleksandrov | 33 | 23.40% | 2 | 33.33% |
Johannes Berg | 7 | 4.96% | 1 | 16.67% |
Total | 141 | 100.00% | 6 | 100.00% |
static inline size_t br_port_info_size(void)
{
return nla_total_size(1) /* IFLA_BRPORT_STATE */
+ nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
+ nla_total_size(4) /* IFLA_BRPORT_COST */
+ nla_total_size(1) /* IFLA_BRPORT_MODE */
+ nla_total_size(1) /* IFLA_BRPORT_GUARD */
+ nla_total_size(1) /* IFLA_BRPORT_PROTECT */
+ nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
+ nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
+ nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
+ nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
+ nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
+ nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+ nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
#endif
+ 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 124 | 60.49% | 9 | 50.00% |
Stephen Hemminger | 54 | 26.34% | 4 | 22.22% |
Vlad Yasevich | 12 | 5.85% | 2 | 11.11% |
Felix Fietkau | 6 | 2.93% | 1 | 5.56% |
Roopa Prabhu | 6 | 2.93% | 1 | 5.56% |
Nicolas Dichtel | 3 | 1.46% | 1 | 5.56% |
Total | 205 | 100.00% | 18 | 100.00% |
static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
+ nla_total_size(br_get_link_af_size_filtered(dev,
filter_mask)); /* IFLA_AF_SPEC */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 58 | 73.42% | 1 | 25.00% |
Roopa Prabhu | 19 | 24.05% | 2 | 50.00% |
Stephen Hemminger | 2 | 2.53% | 1 | 25.00% |
Total | 79 | 100.00% | 4 | 100.00% |
static int br_port_fill_attrs(struct sk_buff *skb,
const struct net_bridge_port *p)
{
u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
u64 timerval;
if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
nla_put_u8(skb, IFLA_BRPORT_PROTECT,
!!(p->flags & BR_ROOT_BLOCK)) ||
nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
!!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
!!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
!!(p->flags & BR_FLOOD)) ||
nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
!!(p->flags & BR_MCAST_FLOOD)) ||
nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
!!(p->flags & BR_PROXYARP_WIFI)) ||
nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
&p->designated_root) ||
nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
&p->designated_bridge) ||
nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
p->topology_change_ack) ||
nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
BR_VLAN_TUNNEL)))
return -EMSGSIZE;
timerval = br_timer_value(&p->message_age_timer);
if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE;
timerval = br_timer_value(&p->forward_delay_timer);
if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE;
timerval = br_timer_value(&p->hold_timer);
if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
IFLA_BRPORT_PAD))
return -EMSGSIZE;
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
p->multicast_router))
return -EMSGSIZE;
#endif
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nikolay Aleksandrov | 216 | 47.06% | 8 | 42.11% |
Stephen Hemminger | 115 | 25.05% | 3 | 15.79% |
Vlad Yasevich | 34 | 7.41% | 2 | 10.53% |
Felix Fietkau | 17 | 3.70% | 1 | 5.26% |
Jouni Malinen | 17 | 3.70% | 1 | 5.26% |
Kyeyoon Park | 17 | 3.70% | 1 | 5.26% |
David S. Miller | 17 | 3.70% | 1 | 5.26% |
Roopa Prabhu | 17 | 3.70% | 1 | 5.26% |
Nicolas Dichtel | 9 | 1.96% | 1 | 5.26% |
Total | 459 | 100.00% | 19 | 100.00% |
static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
u16 vid_end, u16 flags)
{
struct bridge_vlan_info vinfo;
if ((vid_end - vid_start) > 0) {
/* add range to skb */
vinfo.vid = vid_start;
vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
vinfo.vid = vid_end;
vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
} else {
vinfo.vid = vid_start;
vinfo.flags = flags;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roopa Prabhu | 149 | 100.00% | 1 | 100.00% |
Total | 149 | 100.00% | 1 | 100.00% |
static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
struct net_bridge_vlan_group *vg)
{
struct net_bridge_vlan *v;
u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
u16 flags, pvid;
int err = 0;
/* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
* and mark vlan info with begin and end flags
* if vlaninfo represents a range
*/
pvid = br_get_pvid(vg);
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
flags = 0;
if (!br_vlan_should_use(v))
continue;
if (v->vid == pvid)
flags |= BRIDGE_VLAN_INFO_PVID;
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (vid_range_start == 0) {
goto initvars;
} else if ((v->vid - vid_range_end) == 1 &&
flags == vid_range_flags) {
vid_range_end = v->vid;
continue;
} else {
err = br_fill_ifvlaninfo_range(skb, vid_range_start,
vid_range_end,
vid_range_flags);
if (err)
return err;
}
initvars:
vid_range_start = v->vid;
vid_range_end = v->vid;
vid_range_flags = flags;
}
if (vid_range_start != 0) {
/* Call it once more to send any left over vlans */
err = br_fill_ifvlaninfo_range(skb, vid_range_start,
vid_range_end,
vid_range_flags);
if (err)
return err;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roopa Prabhu | 165 | 78.95% | 2 | 40.00% |
Nikolay Aleksandrov | 44 | 21.05% | 3 | 60.00% |
Total | 209 | 100.00% | 5 | 100.00% |
static int br_fill_ifvlaninfo(struct sk_buff *skb,
struct net_bridge_vlan_group *vg)
{
struct bridge_vlan_info vinfo;
struct net_bridge_vlan *v;
u16 pvid;
pvid = br_get_pvid(vg);
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
if (!br_vlan_should_use(v))
continue;
vinfo.vid = v->vid;
vinfo.flags = 0;
if (v->vid == pvid)
vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
sizeof(vinfo), &vinfo))
goto nla_put_failure;
}
return 0;
nla_put_failure:
return -EMSGSIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roopa Prabhu | 88 | 70.40% | 1 | 25.00% |
Nikolay Aleksandrov | 37 | 29.60% | 3 | 75.00% |
Total | 125 | 100.00% | 4 | 100.00% |
/*
* Create one netlink message for one interface
* Contains port and master info as well as carrier and bridge state.
*/
static int br_fill_ifinfo(struct sk_buff *skb,
struct net_bridge_port *port,
u32 pid, u32 seq, int event, unsigned int flags,
u32 filter_mask, const struct net_device *dev)
{
struct net_bridge *br;
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
if (port)
br = port->br;
else
br = netdev_priv(dev);
br_debug(br, "br_fill_info event %d port %s master %s\n",
event, dev->name, br->dev->name);
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
if (nlh == NULL)
return -EMSGSIZE;
hdr = nlmsg_data(nlh);
hdr->ifi_family = AF_BRIDGE;
hdr->__ifi_pad = 0;
hdr->ifi_type = dev->type;
hdr->ifi_index = dev->ifindex;
hdr->ifi_flags = dev_get_flags(dev);
hdr->ifi_change = 0;
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
(dev->ifindex != dev_get_iflink(dev) &&
nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
goto nla_put_failure;
if (event == RTM_NEWLINK && port) {
struct nlattr *nest
= nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
goto nla_put_failure;
nla_nest_end(skb, nest);
}
/* Check if the VID information is requested */
if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
struct net_bridge_vlan_group *vg;
struct nlattr *af;
int err;
/* RCU needed because of the VLAN locking rules (rcu || rtnl) */
rcu_read_lock();
if (port)
vg = nbp_vlan_group_rcu(port);
else
vg = br_vlan_group_rcu(br);
if (!vg || !vg->num_vlans) {
rcu_read_unlock();
goto done;
}
af = nla_nest_start(skb, IFLA_AF_SPEC);
if (!af) {
rcu_read_unlock();
goto nla_put_failure;
}
if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
err = br_fill_ifvlaninfo_compressed(skb, vg);
else
err = br_fill_ifvlaninfo(skb, vg);
if (port && (port->flags & BR_VLAN_TUNNEL))
err = br_fill_vlan_tunnel_info(skb, vg);
rcu_read_unlock();
if (err)
goto nla_put_failure;
nla_nest_end(skb, af);
}
done:
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 238 | 46.85% | 3 | 20.00% |
Vlad Yasevich | 105 | 20.67% | 3 | 20.00% |
Roopa Prabhu | 48 | 9.45% | 2 | 13.33% |
Thomas Graf | 46 | 9.06% | 1 | 6.67% |
Nikolay Aleksandrov | 34 | 6.69% | 2 | 13.33% |
David S. Miller | 23 | 4.53% | 1 | 6.67% |
Nicolas Dichtel | 6 | 1.18% | 1 | 6.67% |
Patrick McHardy | 5 | 0.98% | 1 | 6.67% |
Johannes Berg | 3 | 0.59% | 1 | 6.67% |
Total | 508 | 100.00% | 15 | 100.00% |
/*
* Notify listeners of a change in port information
*/
void br_ifinfo_notify(int event, struct net_bridge_port *port)
{
struct net *net;
struct sk_buff *skb;
int err = -ENOBUFS;
u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
if (!port)
return;
net = dev_net(port->dev);
br_debug(port->br, "port %u(%s) event %d\n",
(unsigned int)port->port_no, port->dev->name, event);
skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev);
if (err < 0) {
/* -EMSGSIZE implies BUG in br_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 97 | 55.75% | 3 | 21.43% |
Patrick McHardy | 20 | 11.49% | 1 | 7.14% |
Vlad Yasevich | 20 | 11.49% | 2 | 14.29% |
Thomas Graf | 13 | 7.47% | 2 | 14.29% |
Roopa Prabhu | 13 | 7.47% | 2 | 14.29% |
Alexey Dobriyan | 7 | 4.02% | 1 | 7.14% |
Denis V. Lunev | 2 | 1.15% | 1 | 7.14% |
Eric Dumazet | 1 | 0.57% | 1 | 7.14% |
Pablo Neira Ayuso | 1 | 0.57% | 1 | 7.14% |
Total | 174 | 100.00% | 14 | 100.00% |
/*
* Dump information about all ports, in response to GETLINK
*/
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask, int nlflags)
{
struct net_bridge_port *port = br_port_get_rtnl(dev);
if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
return 0;
return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
filter_mask, dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 36 | 45.57% | 2 | 20.00% |
Vlad Yasevich | 14 | 17.72% | 1 | 10.00% |
John Fastabend | 11 | 13.92% | 1 | 10.00% |
Roopa Prabhu | 7 | 8.86% | 1 | 10.00% |
Nicolas Dichtel | 4 | 5.06% | 1 | 10.00% |
Dan Carpenter | 4 | 5.06% | 1 | 10.00% |
Denis V. Lunev | 1 | 1.27% | 1 | 10.00% |
Jiri Pirko | 1 | 1.27% | 1 | 10.00% |
Hong Zhi Guo | 1 | 1.27% | 1 | 10.00% |
Total | 79 | 100.00% | 10 | 100.00% |
static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
int cmd, struct bridge_vlan_info *vinfo)
{
int err = 0;
switch (cmd) {
case RTM_SETLINK:
if (p) {
/* if the MASTER flag is set this will act on the global
* per-VLAN entry as well
*/
err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
if (err)
break;
} else {
vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
err = br_vlan_add(br, vinfo->vid, vinfo->flags);
}
break;
case RTM_DELLINK:
if (p) {
nbp_vlan_delete(p, vinfo->vid);
if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
br_vlan_delete(p->br, vinfo->vid);
} else {
br_vlan_delete(br, vinfo->vid);
}
break;
}
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 122 | 85.31% | 2 | 50.00% |
Roopa Prabhu | 15 | 10.49% | 1 | 25.00% |
Nikolay Aleksandrov | 6 | 4.20% | 1 | 25.00% |
Total | 143 | 100.00% | 4 | 100.00% |
static int br_process_vlan_info(struct net_bridge *br,
struct net_bridge_port *p, int cmd,
struct bridge_vlan_info *vinfo_curr,
struct bridge_vlan_info **vinfo_last)
{
if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
return -EINVAL;
if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
/* check if we are already processing a range */
if (*vinfo_last)
return -EINVAL;
*vinfo_last = vinfo_curr;
/* don't allow range of pvids */
if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID)
return -EINVAL;
return 0;
}
if (*vinfo_last) {
struct bridge_vlan_info tmp_vinfo;
int v, err;
if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END))
return -EINVAL;
if (vinfo_curr->vid <= (*vinfo_last)->vid)
return -EINVAL;
memcpy(&tmp_vinfo, *vinfo_last,
sizeof(struct bridge_vlan_info));
for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
tmp_vinfo.vid = v;
err = br_vlan_info(br, p, cmd, &tmp_vinfo);
if (err)
break;
}
*vinfo_last = NULL;
return 0;
}
return br_vlan_info(br, p, cmd, vinfo_curr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roopa Prabhu | 192 | 86.88% | 2 | 40.00% |
Nikolay Aleksandrov | 27 | 12.22% | 2 | 40.00% |
Vlad Yasevich | 2 | 0.90% | 1 | 20.00% |
Total | 221 | 100.00% | 5 | 100.00% |
static int br_afspec(struct net_bridge *br,
struct net_bridge_port *p,
struct nlattr *af_spec,
int cmd)
{
struct bridge_vlan_info *vinfo_curr = NULL;
struct bridge_vlan_info *vinfo_last = NULL;
struct nlattr *attr;
struct vtunnel_info tinfo_last = {};
struct vtunnel_info tinfo_curr = {};
int err = 0, rem;
nla_for_each_nested(attr, af_spec, rem) {
err = 0;
switch (nla_type(attr)) {
case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
if (!(p->flags & BR_VLAN_TUNNEL))
return -EINVAL;
err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
if (err)
return err;
err = br_process_vlan_tunnel_info(br, p, cmd,
&tinfo_curr,
&tinfo_last);
if (err)
return