Release 4.11 net/openvswitch/datapath.c
/*
* Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jhash.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/etherdevice.h>
#include <linux/genetlink.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ethtool.h>
#include <linux/wait.h>
#include <asm/div64.h>
#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/openvswitch.h>
#include <linux/rculist.h>
#include <linux/dmi.h>
#include <net/genetlink.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "datapath.h"
#include "flow.h"
#include "flow_table.h"
#include "flow_netlink.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
unsigned int ovs_net_id __read_mostly;
static struct genl_family dp_packet_genl_family;
static struct genl_family dp_flow_genl_family;
static struct genl_family dp_datapath_genl_family;
static const struct nla_policy flow_policy[];
static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
.name = OVS_FLOW_MCGROUP,
};
static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
.name = OVS_DATAPATH_MCGROUP,
};
static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
.name = OVS_VPORT_MCGROUP,
};
/* Check if need to build a reply message.
* OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
unsigned int group)
{
return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
genl_has_listeners(family, genl_info_net(info), group);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jarno Rajahalme | 29 | 70.73% | 1 | 50.00% |
Samuel Gauthier | 12 | 29.27% | 1 | 50.00% |
Total | 41 | 100.00% | 2 | 100.00% |
static void ovs_notify(struct genl_family *family,
struct sk_buff *skb, struct genl_info *info)
{
genl_notify(family, skb, info, 0, GFP_KERNEL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 26 | 76.47% | 1 | 33.33% |
Johannes Berg | 8 | 23.53% | 2 | 66.67% |
Total | 34 | 100.00% | 3 | 100.00% |
/**
* DOC: Locking:
*
* All writes e.g. Writes to device state (add/remove datapath, port, set
* operations on vports, etc.), Writes to other state (flow table
* modifications, set miscellaneous datapath parameters, etc.) are protected
* by ovs_lock.
*
* Reads are protected by RCU.
*
* There are a few special cases (mostly stats) that have their own
* synchronization but they nest under all of above and don't interact with
* each other.
*
* The RTNL lock nests inside ovs_mutex.
*/
static DEFINE_MUTEX(ovs_mutex);
void ovs_lock(void)
{
mutex_lock(&ovs_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pravin B Shelar | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
void ovs_unlock(void)
{
mutex_unlock(&ovs_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pravin B Shelar | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_LOCKDEP
int lockdep_ovsl_is_held(void)
{
if (debug_locks)
return lockdep_is_held(&ovs_mutex);
else
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pravin B Shelar | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
#endif
static struct vport *new_vport(const struct vport_parms *);
static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
const struct sw_flow_key *,
const struct dp_upcall_info *,
uint32_t cutlen);
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
const struct sw_flow_key *,
const struct dp_upcall_info *,
uint32_t cutlen);
/* Must be called with rcu_read_lock. */
static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
{
struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
if (dev) {
struct vport *vport = ovs_internal_dev_get_vport(dev);
if (vport)
return vport->dp;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 41 | 73.21% | 1 | 33.33% |
Andy Zhou | 9 | 16.07% | 1 | 33.33% |
Pravin B Shelar | 6 | 10.71% | 1 | 33.33% |
Total | 56 | 100.00% | 3 | 100.00% |
/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
* returned dp pointer valid.
*/
static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
{
struct datapath *dp;
WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
rcu_read_lock();
dp = get_dp_rcu(net, dp_ifindex);
rcu_read_unlock();
return dp;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Zhou | 44 | 86.27% | 1 | 50.00% |
Jesse Gross | 7 | 13.73% | 1 | 50.00% |
Total | 51 | 100.00% | 2 | 100.00% |
/* Must be called with rcu_read_lock or ovs_mutex. */
const char *ovs_dp_name(const struct datapath *dp)
{
struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
return ovs_vport_name(vport);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 28 | 90.32% | 1 | 25.00% |
Pravin B Shelar | 2 | 6.45% | 2 | 50.00% |
Thomas Graf | 1 | 3.23% | 1 | 25.00% |
Total | 31 | 100.00% | 4 | 100.00% |
static int get_dpifindex(const struct datapath *dp)
{
struct vport *local;
int ifindex;
rcu_read_lock();
local = ovs_vport_rcu(dp, OVSP_LOCAL);
if (local)
ifindex = local->dev->ifindex;
else
ifindex = 0;
rcu_read_unlock();
return ifindex;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 48 | 87.27% | 1 | 25.00% |
Thomas Graf | 5 | 9.09% | 2 | 50.00% |
Pravin B Shelar | 2 | 3.64% | 1 | 25.00% |
Total | 55 | 100.00% | 4 | 100.00% |
static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
ovs_flow_tbl_destroy(&dp->table);
free_percpu(dp->stats_percpu);
kfree(dp->ports);
kfree(dp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 35 | 66.04% | 1 | 33.33% |
Pravin B Shelar | 18 | 33.96% | 2 | 66.67% |
Total | 53 | 100.00% | 3 | 100.00% |
static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
u16 port_no)
{
return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pravin B Shelar | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
/* Called with ovs_mutex or RCU read lock. */
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
{
struct vport *vport;
struct hlist_head *head;
head = vport_hash_bucket(dp, port_no);
hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
if (vport->port_no == port_no)
return vport;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pravin B Shelar | 56 | 94.92% | 1 | 50.00% |
Jesse Gross | 3 | 5.08% | 1 | 50.00% |
Total | 59 | 100.00% | 2 | 100.00% |
/* Called with ovs_mutex. */
static struct vport *new_vport(const struct vport_parms *parms)
{
struct vport *vport;
vport = ovs_vport_add(parms);
if (!IS_ERR(vport)) {
struct datapath *dp = parms->dp;
struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
hlist_add_head_rcu(&vport->dp_hash_node, head);
}
return vport;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 60 | 83.33% | 1 | 50.00% |
Pravin B Shelar | 12 | 16.67% | 1 | 50.00% |
Total | 72 | 100.00% | 2 | 100.00% |
void ovs_dp_detach_port(struct vport *p)
{
ASSERT_OVSL();
/* First drop references to device. */
hlist_del_rcu(&p->dp_hash_node);
/* Then destroy it. */
ovs_vport_del(p);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 25 | 89.29% | 1 | 33.33% |
Pravin B Shelar | 3 | 10.71% | 2 | 66.67% |
Total | 28 | 100.00% | 3 | 100.00% |
/* Must be called with rcu_read_lock. */
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
{
const struct vport *p = OVS_CB(skb)->input_vport;
struct datapath *dp = p->dp;
struct sw_flow *flow;
struct sw_flow_actions *sf_acts;
struct dp_stats_percpu *stats;
u64 *stats_counter;
u32 n_mask_hit;
stats = this_cpu_ptr(dp->stats_percpu);
/* Look up flow. */
flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
int error;
memset(&upcall, 0, sizeof(upcall));
upcall.cmd = OVS_PACKET_CMD_MISS;
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
upcall.mru = OVS_CB(skb)->mru;
error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
if (unlikely(error))
kfree_skb(skb);
else
consume_skb(skb);
stats_counter = &stats->n_missed;
goto out;
}
ovs_flow_stats_update(flow, key->tp.flags, skb);
sf_acts = rcu_dereference(flow->sf_acts);
ovs_execute_actions(dp, skb, sf_acts, key);
stats_counter = &stats->n_hit;
out:
/* Update datapath statistics. */
u64_stats_update_begin(&stats->syncp);
(*stats_counter)++;
stats->n_mask_hit += n_mask_hit;
u64_stats_update_end(&stats->syncp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 137 | 53.31% | 1 | 5.26% |
Pravin B Shelar | 36 | 14.01% | 6 | 31.58% |
Lorand Jakab | 16 | 6.23% | 1 | 5.26% |
Li RongQing | 15 | 5.84% | 1 | 5.26% |
Andy Zhou | 13 | 5.06% | 2 | 10.53% |
Neil McKee | 13 | 5.06% | 1 | 5.26% |
Joe Stringer | 11 | 4.28% | 1 | 5.26% |
Ben Pfaff | 5 | 1.95% | 1 | 5.26% |
Alex Wang | 5 | 1.95% | 1 | 5.26% |
Américo Wang | 2 | 0.78% | 1 | 5.26% |
William Tu | 2 | 0.78% | 1 | 5.26% |
Eric W. Biedermann | 1 | 0.39% | 1 | 5.26% |
Shan Wei | 1 | 0.39% | 1 | 5.26% |
Total | 257 | 100.00% | 19 | 100.00% |
int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
struct dp_stats_percpu *stats;
int err;
if (upcall_info->portid == 0) {
err = -ENOTCONN;
goto err;
}
if (!skb_is_gso(skb))
err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
else
err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
if (err)
goto err;
return 0;
err:
stats = this_cpu_ptr(dp->stats_percpu);
u64_stats_update_begin(&stats->syncp);
stats->n_lost++;
u64_stats_update_end(&stats->syncp);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 117 | 83.57% | 1 | 14.29% |
Pravin B Shelar | 12 | 8.57% | 2 | 28.57% |
William Tu | 7 | 5.00% | 1 | 14.29% |
Américo Wang | 2 | 1.43% | 1 | 14.29% |
Eric W. Biedermann | 1 | 0.71% | 1 | 14.29% |
Shan Wei | 1 | 0.71% | 1 | 14.29% |
Total | 140 | 100.00% | 7 | 100.00% |
static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
unsigned short gso_type = skb_shinfo(skb)->gso_type;
struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
int err;
BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
segs = __skb_gso_segment(skb, NETIF_F_SG, false);
if (IS_ERR(segs))
return PTR_ERR(segs);
if (segs == NULL)
return -EINVAL;
if (gso_type & SKB_GSO_UDP) {
/* The initial flow key extracted by ovs_flow_key_extract()
* in this case is for a first fragment, so we need to
* properly mark later fragments.
*/
later_key = *key;
later_key.ip.frag = OVS_FRAG_TYPE_LATER;
}
/* Queue all of the segments. */
skb = segs;
do {
if (gso_type & SKB_GSO_UDP && skb != segs)
key = &later_key;
err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
if (err)
break;
} while ((skb = skb->next));
/* Free all of the segments. */
skb = segs;
do {
nskb = skb->next;
if (err)
kfree_skb(skb);
else
consume_skb(skb);
} while ((skb = nskb));
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 133 | 59.91% | 1 | 10.00% |
Pravin B Shelar | 50 | 22.52% | 3 | 30.00% |
Ben Pfaff | 11 | 4.95% | 1 | 10.00% |
Florian Westphal | 10 | 4.50% | 1 | 10.00% |
Konstantin Khlebnikov | 8 | 3.60% | 1 | 10.00% |
William Tu | 5 | 2.25% | 1 | 10.00% |
Américo Wang | 3 | 1.35% | 1 | 10.00% |
Thomas Graf | 2 | 0.90% | 1 | 10.00% |
Total | 222 | 100.00% | 10 | 100.00% |
static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
unsigned int hdrlen)
{
size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
+ nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
+ nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
+ nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
/* OVS_PACKET_ATTR_USERDATA */
if (upcall_info->userdata)
size += NLA_ALIGN(upcall_info->userdata->nla_len);
/* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
if (upcall_info->egress_tun_info)
size += nla_total_size(ovs_tun_key_attr_size());
/* OVS_PACKET_ATTR_ACTIONS */
if (upcall_info->actions_len)
size += nla_total_size(upcall_info->actions_len);
/* OVS_PACKET_ATTR_MRU */
if (upcall_info->mru)
size += nla_total_size(sizeof(upcall_info->mru));
return size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 54 | 44.26% | 2 | 28.57% |
Wenyu Zhang | 21 | 17.21% | 1 | 14.29% |
Joe Stringer | 20 | 16.39% | 2 | 28.57% |
Neil McKee | 16 | 13.11% | 1 | 14.29% |
William Tu | 11 | 9.02% | 1 | 14.29% |
Total | 122 | 100.00% | 7 | 100.00% |
static void pad_packet(struct datapath *dp, struct sk_buff *skb)
{
if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
size_t plen = NLA_ALIGN(skb->len) - skb->len;
if (plen > 0)
memset(skb_put(skb, plen), 0, plen);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Joe Stringer | 63 | 100.00% | 1 | 100.00% |
Total | 63 | 100.00% | 1 | 100.00% |
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
struct ovs_header *upcall;
struct sk_buff *nskb = NULL;
struct sk_buff *user_skb = NULL; /* to be queued to userspace */
struct nlattr *nla;
size_t len;
unsigned int hlen;
int err, dp_ifindex;
dp_ifindex = get_dpifindex(dp);
if (!dp_ifindex)
return -ENODEV;
if (skb_vlan_tag_present(skb)) {
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
nskb = __vlan_hwaccel_push_inside(nskb);
if (!nskb)
return -ENOMEM;
skb = nskb;
}
if (nla_attr_size(skb->len) > USHRT_MAX) {
err = -EFBIG;
goto out;
}
/* Complete checksum if needed */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_checksum_help(skb)))
goto out;
/* Older versions of OVS user space enforce alignment of the last
* Netlink attribute to NLA_ALIGNTO which would require extensive
* padding logic. Only perform zerocopy if padding is not required.
*/
if (dp->user_features & OVS_DP_F_UNALIGNED)
hlen = skb_zerocopy_headlen(skb);
else
hlen = skb->len;
len = upcall_msg_size(upcall_info, hlen - cutlen);
user_skb = genlmsg_new(len, GFP_ATOMIC);
if (!user_skb) {
err = -ENOMEM;
goto out;
}
upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
0, upcall_info->cmd);
upcall->dp_ifindex = dp_ifindex;
err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
BUG_ON(err);
if (upcall_info->userdata)
__nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
nla_len(upcall_info->userdata),
nla_data(upcall_info->userdata));
if (upcall_info->egress_tun_info) {
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
err = ovs_nla_put_tunnel_info(user_skb,
upcall_info->egress_tun_info);
BUG_ON(err);
nla_nest_end(user_skb, nla);
}
if (upcall_info->actions_len) {
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
err = ovs_nla_put_actions(upcall_info->actions,
upcall_info->actions_len,
user_skb);
if (!err)
nla_nest_end(user_skb, nla);
else
nla_nest_cancel(user_skb, nla);
}
/* Add OVS_PACKET_ATTR_MRU */
if (upcall_info->mru) {
if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
upcall_info->mru)) {
err = -ENOBUFS;
goto out;
}
pad_packet(dp, user_skb);
}
/* Add OVS_PACKET_ATTR_LEN when packet is truncated */
if (cutlen > 0) {
if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
skb->len)) {
err = -ENOBUFS;
goto out;
}
pad_packet(dp, user_skb);
}
/* Only reserve room for attribute header, packet data is added
* in skb_zerocopy() */
if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
err = -ENOBUFS;
goto out;
}
nla->nla_len = nla_attr_size(skb->len - cutlen);
err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
if (err)
goto out;
/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
pad_packet(dp, user_skb);
((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
user_skb = NULL;
out:
if (err)
skb_tx_error(skb);
kfree_skb(user_skb);
kfree_skb(nskb);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesse Gross | 218 | 35.91% | 1 | 3.85% |
Thomas Graf | 139 | 22.90% | 5 | 19.23% |
Neil McKee | 52 | 8.57% | 1 | 3.85% |
William Tu | 48 | 7.91% | 2 | 7.69% |
Joe Stringer | 47 | 7.74% | 2 | 7.69% |
Wenyu Zhang | 39 | 6.43% | 1 | 3.85% |
Zoltan Kiss | 18 | 2.97% | 1 | 3.85% |
Li RongQing | 11 | 1.81% | 1 | 3.85% |
Ben Pfaff | 10 | 1.65% | 1 | 3.85% |
Pravin B Shelar | 9 | 1.48% | 3 | 11.54% |
Andy Zhou | 9 | 1.48% | 2 | 7.69% |
Rich Lane | 2 | 0.33% | 1 | 3.85% |
Jiri Pirko | 2 | 0.33% | 2 | 7.69% |
Eric W. Biedermann | 1 | 0.16% | 1 | 3.85% |
Dan Carpenter | 1 | 0.16% | 1 | 3.85% |
Florian Westphal | 1 | 0.16% | 1 | 3.85% |
Total | 607 | 100.00% | 26 | 100.00% |
static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{
struct ovs_header *ovs_header = info->userhdr;
struct net *net = sock_net(skb->sk);
struct nlattr **a = info->attrs;
struct sw_flow_actions *acts;
struct sk_buff *packet;
struct sw_flow *flow;
struct sw_flow_actions *sf_acts;
struct datapath *dp;
struct vport *input_vport;
u16 mru = 0;
int len;
int err;
bool log = !a[OVS_PACKET_ATTR_PROBE];
err = -EINVAL;
if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
!a[OVS_PACKET_ATTR_ACTIONS])
goto err;
len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
err = -ENOMEM;
if (!packet)
goto err;
skb_reserve(packet, NET_IP_ALIGN);
nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
/* Set packet's mru */
if (a[OVS_PACKET_ATTR_MRU]) {
mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
packet->ignore_df = 1;
}
OVS_CB(packet)->mru = mru;
/* Build an sw_flow for sending this packet. */
flow = ovs_flow_alloc();
err = PTR_ERR(flow);
if (IS_ERR(flow))
goto err_kfree_skb;
err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
packet, &flow->key, log);
if (err)
goto err_flow_free;
err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
&flow->key, &acts, log);
if (err)
goto err_flow_free;
rcu_assign_pointer(flow->sf_acts, acts);
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
rcu_read_lock();
dp = get_dp_rcu(net, ovs_header->dp_ifindex);
err = -ENODEV;
if (!dp)
goto err_unlock;
input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
if (!input_vport)
input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
if (!input_vport)
goto err_unlock;
packet->dev = input_vport->dev;
OVS_CB(packet)->input_vport = input_vport;
sf_acts = rcu_dereference(flow->sf_acts);
local_bh_disable();
err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
local_bh_enable();
rcu_read_unlock();
ovs_flow_free(flow, false);
return err;
err_unlock:
rcu_read_unlock();
err_flow_free:
ovs_flow_free(flow, false);
err_kfree_skb:
kfree_skb(packet);
err:
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pravin B Shelar | 378 | 78.42% | 5 | 35.71% |
Joe Stringer | 65 | 13.49% | 2 | 14.29% |
Lorand Jakab | 16 | 3.32% | 1 | 7.14% |
Jarno Rajahalme | 13 | 2.70% | 2 | 14.29% |
Jesse Gross | 7 | 1.45% | 1 | 7.14% |
Andy Zhou | 2 | 0.41% | 2 | 14.29% |
Thomas Graf | 1 | 0.21% | 1 | 7.14% |
Total | 482 | 100.00% | 14 | 100.00% |
static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
[OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
};
static const struct genl_ops dp_packet_genl_ops[] = {
{ .cmd = OVS_PACKET_CMD_EXECUTE,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
.policy = packet_policy,
.doit = ovs_packet_cmd_execute
}
};
static struct genl_family dp_packet_genl_family __ro_after_init = {
.hdrsize = sizeof(struct ovs_header),
.name = OVS_PACKET_FAMILY,
.version = OVS_PACKET_VERSION,
.maxattr = OVS_PACKET_ATTR_MAX,
.netnsok = true,
.parallel_ops = true,
.ops = dp_packet_genl_ops,
.n_ops = ARRAY_SIZE(dp_packet_genl_ops),
.module = THIS_MODULE,
};
static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
struct ovs_dp_megaflow_stats *mega_stats)
{
int i;
memset(mega_stats, 0, sizeof(*mega_stats));
stats->n_flows = ovs_flow_tbl_count(&dp->table);
mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
for_each_possible_cpu(i) {
const struct dp_stats_percpu *percpu_stats;
struct dp_stats_percpu local_stats;
unsigned int start;
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
do {
start = u64_stats_fetch_begin_irq(&percpu_stats