GNU Linux-libre 4.9.292-gnu1
[releases.git] / net / openvswitch / datapath.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53
54 #include "datapath.h"
55 #include "flow.h"
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "vport-internal_dev.h"
59 #include "vport-netdev.h"
60
61 int ovs_net_id __read_mostly;
62 EXPORT_SYMBOL_GPL(ovs_net_id);
63
64 static struct genl_family dp_packet_genl_family;
65 static struct genl_family dp_flow_genl_family;
66 static struct genl_family dp_datapath_genl_family;
67
68 static const struct nla_policy flow_policy[];
69
70 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
71         .name = OVS_FLOW_MCGROUP,
72 };
73
74 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
75         .name = OVS_DATAPATH_MCGROUP,
76 };
77
78 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
79         .name = OVS_VPORT_MCGROUP,
80 };
81
82 /* Check if need to build a reply message.
83  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
84 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
85                             unsigned int group)
86 {
87         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
88                genl_has_listeners(family, genl_info_net(info), group);
89 }
90
91 static void ovs_notify(struct genl_family *family,
92                        struct sk_buff *skb, struct genl_info *info)
93 {
94         genl_notify(family, skb, info, 0, GFP_KERNEL);
95 }
96
97 /**
98  * DOC: Locking:
99  *
100  * All writes e.g. Writes to device state (add/remove datapath, port, set
101  * operations on vports, etc.), Writes to other state (flow table
102  * modifications, set miscellaneous datapath parameters, etc.) are protected
103  * by ovs_lock.
104  *
105  * Reads are protected by RCU.
106  *
107  * There are a few special cases (mostly stats) that have their own
108  * synchronization but they nest under all of above and don't interact with
109  * each other.
110  *
111  * The RTNL lock nests inside ovs_mutex.
112  */
113
114 static DEFINE_MUTEX(ovs_mutex);
115
116 void ovs_lock(void)
117 {
118         mutex_lock(&ovs_mutex);
119 }
120
121 void ovs_unlock(void)
122 {
123         mutex_unlock(&ovs_mutex);
124 }
125
126 #ifdef CONFIG_LOCKDEP
127 int lockdep_ovsl_is_held(void)
128 {
129         if (debug_locks)
130                 return lockdep_is_held(&ovs_mutex);
131         else
132                 return 1;
133 }
134 EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
135 #endif
136
137 static struct vport *new_vport(const struct vport_parms *);
138 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139                              const struct sw_flow_key *,
140                              const struct dp_upcall_info *,
141                              uint32_t cutlen);
142 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
143                                   const struct sw_flow_key *,
144                                   const struct dp_upcall_info *,
145                                   uint32_t cutlen);
146
147 /* Must be called with rcu_read_lock. */
148 static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
149 {
150         struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
151
152         if (dev) {
153                 struct vport *vport = ovs_internal_dev_get_vport(dev);
154                 if (vport)
155                         return vport->dp;
156         }
157
158         return NULL;
159 }
160
161 /* The caller must hold either ovs_mutex or rcu_read_lock to keep the
162  * returned dp pointer valid.
163  */
164 static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
165 {
166         struct datapath *dp;
167
168         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
169         rcu_read_lock();
170         dp = get_dp_rcu(net, dp_ifindex);
171         rcu_read_unlock();
172
173         return dp;
174 }
175
176 /* Must be called with rcu_read_lock or ovs_mutex. */
177 const char *ovs_dp_name(const struct datapath *dp)
178 {
179         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
180         return ovs_vport_name(vport);
181 }
182
183 static int get_dpifindex(const struct datapath *dp)
184 {
185         struct vport *local;
186         int ifindex;
187
188         rcu_read_lock();
189
190         local = ovs_vport_rcu(dp, OVSP_LOCAL);
191         if (local)
192                 ifindex = local->dev->ifindex;
193         else
194                 ifindex = 0;
195
196         rcu_read_unlock();
197
198         return ifindex;
199 }
200
201 static void destroy_dp_rcu(struct rcu_head *rcu)
202 {
203         struct datapath *dp = container_of(rcu, struct datapath, rcu);
204
205         ovs_flow_tbl_destroy(&dp->table);
206         free_percpu(dp->stats_percpu);
207         kfree(dp->ports);
208         kfree(dp);
209 }
210
211 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
212                                             u16 port_no)
213 {
214         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
215 }
216
217 /* Called with ovs_mutex or RCU read lock. */
218 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
219 {
220         struct vport *vport;
221         struct hlist_head *head;
222
223         head = vport_hash_bucket(dp, port_no);
224         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
225                 if (vport->port_no == port_no)
226                         return vport;
227         }
228         return NULL;
229 }
230
231 /* Called with ovs_mutex. */
232 static struct vport *new_vport(const struct vport_parms *parms)
233 {
234         struct vport *vport;
235
236         vport = ovs_vport_add(parms);
237         if (!IS_ERR(vport)) {
238                 struct datapath *dp = parms->dp;
239                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
240
241                 hlist_add_head_rcu(&vport->dp_hash_node, head);
242         }
243         return vport;
244 }
245
246 void ovs_dp_detach_port(struct vport *p)
247 {
248         ASSERT_OVSL();
249
250         /* First drop references to device. */
251         hlist_del_rcu(&p->dp_hash_node);
252
253         /* Then destroy it. */
254         ovs_vport_del(p);
255 }
256
257 /* Must be called with rcu_read_lock. */
258 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
259 {
260         const struct vport *p = OVS_CB(skb)->input_vport;
261         struct datapath *dp = p->dp;
262         struct sw_flow *flow;
263         struct sw_flow_actions *sf_acts;
264         struct dp_stats_percpu *stats;
265         u64 *stats_counter;
266         u32 n_mask_hit;
267
268         stats = this_cpu_ptr(dp->stats_percpu);
269
270         /* Look up flow. */
271         flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
272         if (unlikely(!flow)) {
273                 struct dp_upcall_info upcall;
274                 int error;
275
276                 memset(&upcall, 0, sizeof(upcall));
277                 upcall.cmd = OVS_PACKET_CMD_MISS;
278                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
279                 upcall.mru = OVS_CB(skb)->mru;
280                 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
281                 if (unlikely(error))
282                         kfree_skb(skb);
283                 else
284                         consume_skb(skb);
285                 stats_counter = &stats->n_missed;
286                 goto out;
287         }
288
289         ovs_flow_stats_update(flow, key->tp.flags, skb);
290         sf_acts = rcu_dereference(flow->sf_acts);
291         ovs_execute_actions(dp, skb, sf_acts, key);
292
293         stats_counter = &stats->n_hit;
294
295 out:
296         /* Update datapath statistics. */
297         u64_stats_update_begin(&stats->syncp);
298         (*stats_counter)++;
299         stats->n_mask_hit += n_mask_hit;
300         u64_stats_update_end(&stats->syncp);
301 }
302
303 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
304                   const struct sw_flow_key *key,
305                   const struct dp_upcall_info *upcall_info,
306                   uint32_t cutlen)
307 {
308         struct dp_stats_percpu *stats;
309         int err;
310
311         if (upcall_info->portid == 0) {
312                 err = -ENOTCONN;
313                 goto err;
314         }
315
316         if (!skb_is_gso(skb))
317                 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
318         else
319                 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
320         if (err)
321                 goto err;
322
323         return 0;
324
325 err:
326         stats = this_cpu_ptr(dp->stats_percpu);
327
328         u64_stats_update_begin(&stats->syncp);
329         stats->n_lost++;
330         u64_stats_update_end(&stats->syncp);
331
332         return err;
333 }
334
335 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
336                              const struct sw_flow_key *key,
337                              const struct dp_upcall_info *upcall_info,
338                                  uint32_t cutlen)
339 {
340         unsigned short gso_type = skb_shinfo(skb)->gso_type;
341         struct sw_flow_key later_key;
342         struct sk_buff *segs, *nskb;
343         int err;
344
345         BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
346         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
347         if (IS_ERR(segs))
348                 return PTR_ERR(segs);
349         if (segs == NULL)
350                 return -EINVAL;
351
352         if (gso_type & SKB_GSO_UDP) {
353                 /* The initial flow key extracted by ovs_flow_key_extract()
354                  * in this case is for a first fragment, so we need to
355                  * properly mark later fragments.
356                  */
357                 later_key = *key;
358                 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
359         }
360
361         /* Queue all of the segments. */
362         skb = segs;
363         do {
364                 if (gso_type & SKB_GSO_UDP && skb != segs)
365                         key = &later_key;
366
367                 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
368                 if (err)
369                         break;
370
371         } while ((skb = skb->next));
372
373         /* Free all of the segments. */
374         skb = segs;
375         do {
376                 nskb = skb->next;
377                 if (err)
378                         kfree_skb(skb);
379                 else
380                         consume_skb(skb);
381         } while ((skb = nskb));
382         return err;
383 }
384
385 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
386                               unsigned int hdrlen, int actions_attrlen)
387 {
388         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
389                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
390                 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
391                 + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
392
393         /* OVS_PACKET_ATTR_USERDATA */
394         if (upcall_info->userdata)
395                 size += NLA_ALIGN(upcall_info->userdata->nla_len);
396
397         /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
398         if (upcall_info->egress_tun_info)
399                 size += nla_total_size(ovs_tun_key_attr_size());
400
401         /* OVS_PACKET_ATTR_ACTIONS */
402         if (upcall_info->actions_len)
403                 size += nla_total_size(actions_attrlen);
404
405         /* OVS_PACKET_ATTR_MRU */
406         if (upcall_info->mru)
407                 size += nla_total_size(sizeof(upcall_info->mru));
408
409         return size;
410 }
411
412 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
413 {
414         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
415                 size_t plen = NLA_ALIGN(skb->len) - skb->len;
416
417                 if (plen > 0)
418                         memset(skb_put(skb, plen), 0, plen);
419         }
420 }
421
422 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
423                                   const struct sw_flow_key *key,
424                                   const struct dp_upcall_info *upcall_info,
425                                   uint32_t cutlen)
426 {
427         struct ovs_header *upcall;
428         struct sk_buff *nskb = NULL;
429         struct sk_buff *user_skb = NULL; /* to be queued to userspace */
430         struct nlattr *nla;
431         size_t len;
432         unsigned int hlen;
433         int err, dp_ifindex;
434
435         dp_ifindex = get_dpifindex(dp);
436         if (!dp_ifindex)
437                 return -ENODEV;
438
439         if (skb_vlan_tag_present(skb)) {
440                 nskb = skb_clone(skb, GFP_ATOMIC);
441                 if (!nskb)
442                         return -ENOMEM;
443
444                 nskb = __vlan_hwaccel_push_inside(nskb);
445                 if (!nskb)
446                         return -ENOMEM;
447
448                 skb = nskb;
449         }
450
451         if (nla_attr_size(skb->len) > USHRT_MAX) {
452                 err = -EFBIG;
453                 goto out;
454         }
455
456         /* Complete checksum if needed */
457         if (skb->ip_summed == CHECKSUM_PARTIAL &&
458             (err = skb_checksum_help(skb)))
459                 goto out;
460
461         /* Older versions of OVS user space enforce alignment of the last
462          * Netlink attribute to NLA_ALIGNTO which would require extensive
463          * padding logic. Only perform zerocopy if padding is not required.
464          */
465         if (dp->user_features & OVS_DP_F_UNALIGNED)
466                 hlen = skb_zerocopy_headlen(skb);
467         else
468                 hlen = skb->len;
469
470         len = upcall_msg_size(upcall_info, hlen - cutlen,
471                               OVS_CB(skb)->acts_origlen);
472         user_skb = genlmsg_new(len, GFP_ATOMIC);
473         if (!user_skb) {
474                 err = -ENOMEM;
475                 goto out;
476         }
477
478         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
479                              0, upcall_info->cmd);
480         upcall->dp_ifindex = dp_ifindex;
481
482         err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
483         BUG_ON(err);
484
485         if (upcall_info->userdata)
486                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
487                           nla_len(upcall_info->userdata),
488                           nla_data(upcall_info->userdata));
489
490         if (upcall_info->egress_tun_info) {
491                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
492                 err = ovs_nla_put_tunnel_info(user_skb,
493                                               upcall_info->egress_tun_info);
494                 BUG_ON(err);
495                 nla_nest_end(user_skb, nla);
496         }
497
498         if (upcall_info->actions_len) {
499                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
500                 err = ovs_nla_put_actions(upcall_info->actions,
501                                           upcall_info->actions_len,
502                                           user_skb);
503                 if (!err)
504                         nla_nest_end(user_skb, nla);
505                 else
506                         nla_nest_cancel(user_skb, nla);
507         }
508
509         /* Add OVS_PACKET_ATTR_MRU */
510         if (upcall_info->mru) {
511                 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
512                                 upcall_info->mru)) {
513                         err = -ENOBUFS;
514                         goto out;
515                 }
516                 pad_packet(dp, user_skb);
517         }
518
519         /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
520         if (cutlen > 0) {
521                 if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
522                                 skb->len)) {
523                         err = -ENOBUFS;
524                         goto out;
525                 }
526                 pad_packet(dp, user_skb);
527         }
528
529         /* Only reserve room for attribute header, packet data is added
530          * in skb_zerocopy() */
531         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
532                 err = -ENOBUFS;
533                 goto out;
534         }
535         nla->nla_len = nla_attr_size(skb->len - cutlen);
536
537         err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
538         if (err)
539                 goto out;
540
541         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
542         pad_packet(dp, user_skb);
543
544         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
545
546         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
547         user_skb = NULL;
548 out:
549         if (err)
550                 skb_tx_error(skb);
551         kfree_skb(user_skb);
552         kfree_skb(nskb);
553         return err;
554 }
555
556 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
557 {
558         struct ovs_header *ovs_header = info->userhdr;
559         struct net *net = sock_net(skb->sk);
560         struct nlattr **a = info->attrs;
561         struct sw_flow_actions *acts;
562         struct sk_buff *packet;
563         struct sw_flow *flow;
564         struct sw_flow_actions *sf_acts;
565         struct datapath *dp;
566         struct ethhdr *eth;
567         struct vport *input_vport;
568         u16 mru = 0;
569         int len;
570         int err;
571         bool log = !a[OVS_PACKET_ATTR_PROBE];
572
573         err = -EINVAL;
574         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
575             !a[OVS_PACKET_ATTR_ACTIONS])
576                 goto err;
577
578         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
579         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
580         err = -ENOMEM;
581         if (!packet)
582                 goto err;
583         skb_reserve(packet, NET_IP_ALIGN);
584
585         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
586
587         skb_reset_mac_header(packet);
588         eth = eth_hdr(packet);
589
590         /* Normally, setting the skb 'protocol' field would be handled by a
591          * call to eth_type_trans(), but it assumes there's a sending
592          * device, which we may not have. */
593         if (eth_proto_is_802_3(eth->h_proto))
594                 packet->protocol = eth->h_proto;
595         else
596                 packet->protocol = htons(ETH_P_802_2);
597
598         /* Set packet's mru */
599         if (a[OVS_PACKET_ATTR_MRU]) {
600                 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
601                 packet->ignore_df = 1;
602         }
603         OVS_CB(packet)->mru = mru;
604
605         /* Build an sw_flow for sending this packet. */
606         flow = ovs_flow_alloc();
607         err = PTR_ERR(flow);
608         if (IS_ERR(flow))
609                 goto err_kfree_skb;
610
611         err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
612                                              packet, &flow->key, log);
613         if (err)
614                 goto err_flow_free;
615
616         err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
617                                    &flow->key, &acts, log);
618         if (err)
619                 goto err_flow_free;
620
621         rcu_assign_pointer(flow->sf_acts, acts);
622         packet->priority = flow->key.phy.priority;
623         packet->mark = flow->key.phy.skb_mark;
624
625         rcu_read_lock();
626         dp = get_dp_rcu(net, ovs_header->dp_ifindex);
627         err = -ENODEV;
628         if (!dp)
629                 goto err_unlock;
630
631         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
632         if (!input_vport)
633                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
634
635         if (!input_vport)
636                 goto err_unlock;
637
638         packet->dev = input_vport->dev;
639         OVS_CB(packet)->input_vport = input_vport;
640         sf_acts = rcu_dereference(flow->sf_acts);
641
642         local_bh_disable();
643         err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
644         local_bh_enable();
645         rcu_read_unlock();
646
647         ovs_flow_free(flow, false);
648         return err;
649
650 err_unlock:
651         rcu_read_unlock();
652 err_flow_free:
653         ovs_flow_free(flow, false);
654 err_kfree_skb:
655         kfree_skb(packet);
656 err:
657         return err;
658 }
659
660 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
661         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
662         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
663         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
664         [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
665         [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
666 };
667
668 static const struct genl_ops dp_packet_genl_ops[] = {
669         { .cmd = OVS_PACKET_CMD_EXECUTE,
670           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
671           .policy = packet_policy,
672           .doit = ovs_packet_cmd_execute
673         }
674 };
675
676 static struct genl_family dp_packet_genl_family = {
677         .id = GENL_ID_GENERATE,
678         .hdrsize = sizeof(struct ovs_header),
679         .name = OVS_PACKET_FAMILY,
680         .version = OVS_PACKET_VERSION,
681         .maxattr = OVS_PACKET_ATTR_MAX,
682         .netnsok = true,
683         .parallel_ops = true,
684         .ops = dp_packet_genl_ops,
685         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
686 };
687
688 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
689                          struct ovs_dp_megaflow_stats *mega_stats)
690 {
691         int i;
692
693         memset(mega_stats, 0, sizeof(*mega_stats));
694
695         stats->n_flows = ovs_flow_tbl_count(&dp->table);
696         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
697
698         stats->n_hit = stats->n_missed = stats->n_lost = 0;
699
700         for_each_possible_cpu(i) {
701                 const struct dp_stats_percpu *percpu_stats;
702                 struct dp_stats_percpu local_stats;
703                 unsigned int start;
704
705                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
706
707                 do {
708                         start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
709                         local_stats = *percpu_stats;
710                 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
711
712                 stats->n_hit += local_stats.n_hit;
713                 stats->n_missed += local_stats.n_missed;
714                 stats->n_lost += local_stats.n_lost;
715                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
716         }
717 }
718
719 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
720 {
721         return ovs_identifier_is_ufid(sfid) &&
722                !(ufid_flags & OVS_UFID_F_OMIT_KEY);
723 }
724
725 static bool should_fill_mask(uint32_t ufid_flags)
726 {
727         return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
728 }
729
730 static bool should_fill_actions(uint32_t ufid_flags)
731 {
732         return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
733 }
734
735 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
736                                     const struct sw_flow_id *sfid,
737                                     uint32_t ufid_flags)
738 {
739         size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
740
741         /* OVS_FLOW_ATTR_UFID, or unmasked flow key as fallback
742          * see ovs_nla_put_identifier()
743          */
744         if (sfid && ovs_identifier_is_ufid(sfid))
745                 len += nla_total_size(sfid->ufid_len);
746         else
747                 len += nla_total_size(ovs_key_attr_size());
748
749         /* OVS_FLOW_ATTR_KEY */
750         if (!sfid || should_fill_key(sfid, ufid_flags))
751                 len += nla_total_size(ovs_key_attr_size());
752
753         /* OVS_FLOW_ATTR_MASK */
754         if (should_fill_mask(ufid_flags))
755                 len += nla_total_size(ovs_key_attr_size());
756
757         /* OVS_FLOW_ATTR_ACTIONS */
758         if (should_fill_actions(ufid_flags))
759                 len += nla_total_size(acts->orig_len);
760
761         return len
762                 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
763                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
764                 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
765 }
766
767 /* Called with ovs_mutex or RCU read lock. */
768 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
769                                    struct sk_buff *skb)
770 {
771         struct ovs_flow_stats stats;
772         __be16 tcp_flags;
773         unsigned long used;
774
775         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
776
777         if (used &&
778             nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
779                               OVS_FLOW_ATTR_PAD))
780                 return -EMSGSIZE;
781
782         if (stats.n_packets &&
783             nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
784                           sizeof(struct ovs_flow_stats), &stats,
785                           OVS_FLOW_ATTR_PAD))
786                 return -EMSGSIZE;
787
788         if ((u8)ntohs(tcp_flags) &&
789              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
790                 return -EMSGSIZE;
791
792         return 0;
793 }
794
795 /* Called with ovs_mutex or RCU read lock. */
796 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
797                                      struct sk_buff *skb, int skb_orig_len)
798 {
799         struct nlattr *start;
800         int err;
801
802         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
803          * this is the first flow to be dumped into 'skb'.  This is unusual for
804          * Netlink but individual action lists can be longer than
805          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
806          * The userspace caller can always fetch the actions separately if it
807          * really wants them.  (Most userspace callers in fact don't care.)
808          *
809          * This can only fail for dump operations because the skb is always
810          * properly sized for single flows.
811          */
812         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
813         if (start) {
814                 const struct sw_flow_actions *sf_acts;
815
816                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
817                 err = ovs_nla_put_actions(sf_acts->actions,
818                                           sf_acts->actions_len, skb);
819
820                 if (!err)
821                         nla_nest_end(skb, start);
822                 else {
823                         if (skb_orig_len)
824                                 return err;
825
826                         nla_nest_cancel(skb, start);
827                 }
828         } else if (skb_orig_len) {
829                 return -EMSGSIZE;
830         }
831
832         return 0;
833 }
834
835 /* Called with ovs_mutex or RCU read lock. */
836 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
837                                   struct sk_buff *skb, u32 portid,
838                                   u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
839 {
840         const int skb_orig_len = skb->len;
841         struct ovs_header *ovs_header;
842         int err;
843
844         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
845                                  flags, cmd);
846         if (!ovs_header)
847                 return -EMSGSIZE;
848
849         ovs_header->dp_ifindex = dp_ifindex;
850
851         err = ovs_nla_put_identifier(flow, skb);
852         if (err)
853                 goto error;
854
855         if (should_fill_key(&flow->id, ufid_flags)) {
856                 err = ovs_nla_put_masked_key(flow, skb);
857                 if (err)
858                         goto error;
859         }
860
861         if (should_fill_mask(ufid_flags)) {
862                 err = ovs_nla_put_mask(flow, skb);
863                 if (err)
864                         goto error;
865         }
866
867         err = ovs_flow_cmd_fill_stats(flow, skb);
868         if (err)
869                 goto error;
870
871         if (should_fill_actions(ufid_flags)) {
872                 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
873                 if (err)
874                         goto error;
875         }
876
877         genlmsg_end(skb, ovs_header);
878         return 0;
879
880 error:
881         genlmsg_cancel(skb, ovs_header);
882         return err;
883 }
884
885 /* May not be called with RCU read lock. */
886 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
887                                                const struct sw_flow_id *sfid,
888                                                struct genl_info *info,
889                                                bool always,
890                                                uint32_t ufid_flags)
891 {
892         struct sk_buff *skb;
893         size_t len;
894
895         if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
896                 return NULL;
897
898         len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
899         skb = genlmsg_new(len, GFP_KERNEL);
900         if (!skb)
901                 return ERR_PTR(-ENOMEM);
902
903         return skb;
904 }
905
906 /* Called with ovs_mutex. */
907 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
908                                                int dp_ifindex,
909                                                struct genl_info *info, u8 cmd,
910                                                bool always, u32 ufid_flags)
911 {
912         struct sk_buff *skb;
913         int retval;
914
915         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
916                                       &flow->id, info, always, ufid_flags);
917         if (IS_ERR_OR_NULL(skb))
918                 return skb;
919
920         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
921                                         info->snd_portid, info->snd_seq, 0,
922                                         cmd, ufid_flags);
923         if (WARN_ON_ONCE(retval < 0)) {
924                 kfree_skb(skb);
925                 skb = ERR_PTR(retval);
926         }
927         return skb;
928 }
929
930 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
931 {
932         struct net *net = sock_net(skb->sk);
933         struct nlattr **a = info->attrs;
934         struct ovs_header *ovs_header = info->userhdr;
935         struct sw_flow *flow = NULL, *new_flow;
936         struct sw_flow_mask mask;
937         struct sk_buff *reply;
938         struct datapath *dp;
939         struct sw_flow_actions *acts;
940         struct sw_flow_match match;
941         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
942         int error;
943         bool log = !a[OVS_FLOW_ATTR_PROBE];
944
945         /* Must have key and actions. */
946         error = -EINVAL;
947         if (!a[OVS_FLOW_ATTR_KEY]) {
948                 OVS_NLERR(log, "Flow key attr not present in new flow.");
949                 goto error;
950         }
951         if (!a[OVS_FLOW_ATTR_ACTIONS]) {
952                 OVS_NLERR(log, "Flow actions attr not present in new flow.");
953                 goto error;
954         }
955
956         /* Most of the time we need to allocate a new flow, do it before
957          * locking.
958          */
959         new_flow = ovs_flow_alloc();
960         if (IS_ERR(new_flow)) {
961                 error = PTR_ERR(new_flow);
962                 goto error;
963         }
964
965         /* Extract key. */
966         ovs_match_init(&match, &new_flow->key, false, &mask);
967         error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
968                                   a[OVS_FLOW_ATTR_MASK], log);
969         if (error)
970                 goto err_kfree_flow;
971
972         /* Extract flow identifier. */
973         error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
974                                        &new_flow->key, log);
975         if (error)
976                 goto err_kfree_flow;
977
978         /* unmasked key is needed to match when ufid is not used. */
979         if (ovs_identifier_is_key(&new_flow->id))
980                 match.key = new_flow->id.unmasked_key;
981
982         ovs_flow_mask_key(&new_flow->key, &new_flow->key, true, &mask);
983
984         /* Validate actions. */
985         error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
986                                      &new_flow->key, &acts, log);
987         if (error) {
988                 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
989                 goto err_kfree_flow;
990         }
991
992         reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
993                                         ufid_flags);
994         if (IS_ERR(reply)) {
995                 error = PTR_ERR(reply);
996                 goto err_kfree_acts;
997         }
998
999         ovs_lock();
1000         dp = get_dp(net, ovs_header->dp_ifindex);
1001         if (unlikely(!dp)) {
1002                 error = -ENODEV;
1003                 goto err_unlock_ovs;
1004         }
1005
1006         /* Check if this is a duplicate flow */
1007         if (ovs_identifier_is_ufid(&new_flow->id))
1008                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
1009         if (!flow)
1010                 flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->key);
1011         if (likely(!flow)) {
1012                 rcu_assign_pointer(new_flow->sf_acts, acts);
1013
1014                 /* Put flow in bucket. */
1015                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1016                 if (unlikely(error)) {
1017                         acts = NULL;
1018                         goto err_unlock_ovs;
1019                 }
1020
1021                 if (unlikely(reply)) {
1022                         error = ovs_flow_cmd_fill_info(new_flow,
1023                                                        ovs_header->dp_ifindex,
1024                                                        reply, info->snd_portid,
1025                                                        info->snd_seq, 0,
1026                                                        OVS_FLOW_CMD_NEW,
1027                                                        ufid_flags);
1028                         BUG_ON(error < 0);
1029                 }
1030                 ovs_unlock();
1031         } else {
1032                 struct sw_flow_actions *old_acts;
1033
1034                 /* Bail out if we're not allowed to modify an existing flow.
1035                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1036                  * because Generic Netlink treats the latter as a dump
1037                  * request.  We also accept NLM_F_EXCL in case that bug ever
1038                  * gets fixed.
1039                  */
1040                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1041                                                          | NLM_F_EXCL))) {
1042                         error = -EEXIST;
1043                         goto err_unlock_ovs;
1044                 }
1045                 /* The flow identifier has to be the same for flow updates.
1046                  * Look for any overlapping flow.
1047                  */
1048                 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1049                         if (ovs_identifier_is_key(&flow->id))
1050                                 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1051                                                                  &match);
1052                         else /* UFID matches but key is different */
1053                                 flow = NULL;
1054                         if (!flow) {
1055                                 error = -ENOENT;
1056                                 goto err_unlock_ovs;
1057                         }
1058                 }
1059                 /* Update actions. */
1060                 old_acts = ovsl_dereference(flow->sf_acts);
1061                 rcu_assign_pointer(flow->sf_acts, acts);
1062
1063                 if (unlikely(reply)) {
1064                         error = ovs_flow_cmd_fill_info(flow,
1065                                                        ovs_header->dp_ifindex,
1066                                                        reply, info->snd_portid,
1067                                                        info->snd_seq, 0,
1068                                                        OVS_FLOW_CMD_NEW,
1069                                                        ufid_flags);
1070                         BUG_ON(error < 0);
1071                 }
1072                 ovs_unlock();
1073
1074                 ovs_nla_free_flow_actions_rcu(old_acts);
1075                 ovs_flow_free(new_flow, false);
1076         }
1077
1078         if (reply)
1079                 ovs_notify(&dp_flow_genl_family, reply, info);
1080         return 0;
1081
1082 err_unlock_ovs:
1083         ovs_unlock();
1084         kfree_skb(reply);
1085 err_kfree_acts:
1086         ovs_nla_free_flow_actions(acts);
1087 err_kfree_flow:
1088         ovs_flow_free(new_flow, false);
1089 error:
1090         return error;
1091 }
1092
1093 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1094 static struct sw_flow_actions *get_flow_actions(struct net *net,
1095                                                 const struct nlattr *a,
1096                                                 const struct sw_flow_key *key,
1097                                                 const struct sw_flow_mask *mask,
1098                                                 bool log)
1099 {
1100         struct sw_flow_actions *acts;
1101         struct sw_flow_key masked_key;
1102         int error;
1103
1104         ovs_flow_mask_key(&masked_key, key, true, mask);
1105         error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1106         if (error) {
1107                 OVS_NLERR(log,
1108                           "Actions may not be safe on all matching packets");
1109                 return ERR_PTR(error);
1110         }
1111
1112         return acts;
1113 }
1114
1115 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1116 {
1117         struct net *net = sock_net(skb->sk);
1118         struct nlattr **a = info->attrs;
1119         struct ovs_header *ovs_header = info->userhdr;
1120         struct sw_flow_key key;
1121         struct sw_flow *flow;
1122         struct sw_flow_mask mask;
1123         struct sk_buff *reply = NULL;
1124         struct datapath *dp;
1125         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1126         struct sw_flow_match match;
1127         struct sw_flow_id sfid;
1128         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1129         int error = 0;
1130         bool log = !a[OVS_FLOW_ATTR_PROBE];
1131         bool ufid_present;
1132
1133         ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1134         if (a[OVS_FLOW_ATTR_KEY]) {
1135                 ovs_match_init(&match, &key, true, &mask);
1136                 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1137                                           a[OVS_FLOW_ATTR_MASK], log);
1138         } else if (!ufid_present) {
1139                 OVS_NLERR(log,
1140                           "Flow set message rejected, Key attribute missing.");
1141                 error = -EINVAL;
1142         }
1143         if (error)
1144                 goto error;
1145
1146         /* Validate actions. */
1147         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1148                 if (!a[OVS_FLOW_ATTR_KEY]) {
1149                         OVS_NLERR(log,
1150                                   "Flow key attribute not present in set flow.");
1151                         error = -EINVAL;
1152                         goto error;
1153                 }
1154
1155                 acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key,
1156                                         &mask, log);
1157                 if (IS_ERR(acts)) {
1158                         error = PTR_ERR(acts);
1159                         goto error;
1160                 }
1161
1162                 /* Can allocate before locking if have acts. */
1163                 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1164                                                 ufid_flags);
1165                 if (IS_ERR(reply)) {
1166                         error = PTR_ERR(reply);
1167                         goto err_kfree_acts;
1168                 }
1169         }
1170
1171         ovs_lock();
1172         dp = get_dp(net, ovs_header->dp_ifindex);
1173         if (unlikely(!dp)) {
1174                 error = -ENODEV;
1175                 goto err_unlock_ovs;
1176         }
1177         /* Check that the flow exists. */
1178         if (ufid_present)
1179                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1180         else
1181                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1182         if (unlikely(!flow)) {
1183                 error = -ENOENT;
1184                 goto err_unlock_ovs;
1185         }
1186
1187         /* Update actions, if present. */
1188         if (likely(acts)) {
1189                 old_acts = ovsl_dereference(flow->sf_acts);
1190                 rcu_assign_pointer(flow->sf_acts, acts);
1191
1192                 if (unlikely(reply)) {
1193                         error = ovs_flow_cmd_fill_info(flow,
1194                                                        ovs_header->dp_ifindex,
1195                                                        reply, info->snd_portid,
1196                                                        info->snd_seq, 0,
1197                                                        OVS_FLOW_CMD_NEW,
1198                                                        ufid_flags);
1199                         BUG_ON(error < 0);
1200                 }
1201         } else {
1202                 /* Could not alloc without acts before locking. */
1203                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1204                                                 info, OVS_FLOW_CMD_NEW, false,
1205                                                 ufid_flags);
1206
1207                 if (IS_ERR(reply)) {
1208                         error = PTR_ERR(reply);
1209                         goto err_unlock_ovs;
1210                 }
1211         }
1212
1213         /* Clear stats. */
1214         if (a[OVS_FLOW_ATTR_CLEAR])
1215                 ovs_flow_stats_clear(flow);
1216         ovs_unlock();
1217
1218         if (reply)
1219                 ovs_notify(&dp_flow_genl_family, reply, info);
1220         if (old_acts)
1221                 ovs_nla_free_flow_actions_rcu(old_acts);
1222
1223         return 0;
1224
1225 err_unlock_ovs:
1226         ovs_unlock();
1227         kfree_skb(reply);
1228 err_kfree_acts:
1229         ovs_nla_free_flow_actions(acts);
1230 error:
1231         return error;
1232 }
1233
1234 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1235 {
1236         struct nlattr **a = info->attrs;
1237         struct ovs_header *ovs_header = info->userhdr;
1238         struct net *net = sock_net(skb->sk);
1239         struct sw_flow_key key;
1240         struct sk_buff *reply;
1241         struct sw_flow *flow;
1242         struct datapath *dp;
1243         struct sw_flow_match match;
1244         struct sw_flow_id ufid;
1245         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1246         int err = 0;
1247         bool log = !a[OVS_FLOW_ATTR_PROBE];
1248         bool ufid_present;
1249
1250         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1251         if (a[OVS_FLOW_ATTR_KEY]) {
1252                 ovs_match_init(&match, &key, true, NULL);
1253                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1254                                         log);
1255         } else if (!ufid_present) {
1256                 OVS_NLERR(log,
1257                           "Flow get message rejected, Key attribute missing.");
1258                 err = -EINVAL;
1259         }
1260         if (err)
1261                 return err;
1262
1263         ovs_lock();
1264         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1265         if (!dp) {
1266                 err = -ENODEV;
1267                 goto unlock;
1268         }
1269
1270         if (ufid_present)
1271                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1272         else
1273                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1274         if (!flow) {
1275                 err = -ENOENT;
1276                 goto unlock;
1277         }
1278
1279         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1280                                         OVS_FLOW_CMD_NEW, true, ufid_flags);
1281         if (IS_ERR(reply)) {
1282                 err = PTR_ERR(reply);
1283                 goto unlock;
1284         }
1285
1286         ovs_unlock();
1287         return genlmsg_reply(reply, info);
1288 unlock:
1289         ovs_unlock();
1290         return err;
1291 }
1292
1293 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1294 {
1295         struct nlattr **a = info->attrs;
1296         struct ovs_header *ovs_header = info->userhdr;
1297         struct net *net = sock_net(skb->sk);
1298         struct sw_flow_key key;
1299         struct sk_buff *reply;
1300         struct sw_flow *flow = NULL;
1301         struct datapath *dp;
1302         struct sw_flow_match match;
1303         struct sw_flow_id ufid;
1304         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1305         int err;
1306         bool log = !a[OVS_FLOW_ATTR_PROBE];
1307         bool ufid_present;
1308
1309         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1310         if (a[OVS_FLOW_ATTR_KEY]) {
1311                 ovs_match_init(&match, &key, true, NULL);
1312                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1313                                         NULL, log);
1314                 if (unlikely(err))
1315                         return err;
1316         }
1317
1318         ovs_lock();
1319         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1320         if (unlikely(!dp)) {
1321                 err = -ENODEV;
1322                 goto unlock;
1323         }
1324
1325         if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1326                 err = ovs_flow_tbl_flush(&dp->table);
1327                 goto unlock;
1328         }
1329
1330         if (ufid_present)
1331                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1332         else
1333                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1334         if (unlikely(!flow)) {
1335                 err = -ENOENT;
1336                 goto unlock;
1337         }
1338
1339         ovs_flow_tbl_remove(&dp->table, flow);
1340         ovs_unlock();
1341
1342         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1343                                         &flow->id, info, false, ufid_flags);
1344         if (likely(reply)) {
1345                 if (likely(!IS_ERR(reply))) {
1346                         rcu_read_lock();        /*To keep RCU checker happy. */
1347                         err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1348                                                      reply, info->snd_portid,
1349                                                      info->snd_seq, 0,
1350                                                      OVS_FLOW_CMD_DEL,
1351                                                      ufid_flags);
1352                         rcu_read_unlock();
1353                         if (WARN_ON_ONCE(err < 0)) {
1354                                 kfree_skb(reply);
1355                                 goto out_free;
1356                         }
1357
1358                         ovs_notify(&dp_flow_genl_family, reply, info);
1359                 } else {
1360                         netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1361                 }
1362         }
1363
1364 out_free:
1365         ovs_flow_free(flow, true);
1366         return 0;
1367 unlock:
1368         ovs_unlock();
1369         return err;
1370 }
1371
1372 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1373 {
1374         struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1375         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1376         struct table_instance *ti;
1377         struct datapath *dp;
1378         u32 ufid_flags;
1379         int err;
1380
1381         err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
1382                             OVS_FLOW_ATTR_MAX, flow_policy);
1383         if (err)
1384                 return err;
1385         ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1386
1387         rcu_read_lock();
1388         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1389         if (!dp) {
1390                 rcu_read_unlock();
1391                 return -ENODEV;
1392         }
1393
1394         ti = rcu_dereference(dp->table.ti);
1395         for (;;) {
1396                 struct sw_flow *flow;
1397                 u32 bucket, obj;
1398
1399                 bucket = cb->args[0];
1400                 obj = cb->args[1];
1401                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1402                 if (!flow)
1403                         break;
1404
1405                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1406                                            NETLINK_CB(cb->skb).portid,
1407                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1408                                            OVS_FLOW_CMD_NEW, ufid_flags) < 0)
1409                         break;
1410
1411                 cb->args[0] = bucket;
1412                 cb->args[1] = obj;
1413         }
1414         rcu_read_unlock();
1415         return skb->len;
1416 }
1417
1418 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1419         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1420         [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1421         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1422         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1423         [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1424         [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1425         [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1426 };
1427
1428 static const struct genl_ops dp_flow_genl_ops[] = {
1429         { .cmd = OVS_FLOW_CMD_NEW,
1430           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1431           .policy = flow_policy,
1432           .doit = ovs_flow_cmd_new
1433         },
1434         { .cmd = OVS_FLOW_CMD_DEL,
1435           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1436           .policy = flow_policy,
1437           .doit = ovs_flow_cmd_del
1438         },
1439         { .cmd = OVS_FLOW_CMD_GET,
1440           .flags = 0,               /* OK for unprivileged users. */
1441           .policy = flow_policy,
1442           .doit = ovs_flow_cmd_get,
1443           .dumpit = ovs_flow_cmd_dump
1444         },
1445         { .cmd = OVS_FLOW_CMD_SET,
1446           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1447           .policy = flow_policy,
1448           .doit = ovs_flow_cmd_set,
1449         },
1450 };
1451
1452 static struct genl_family dp_flow_genl_family = {
1453         .id = GENL_ID_GENERATE,
1454         .hdrsize = sizeof(struct ovs_header),
1455         .name = OVS_FLOW_FAMILY,
1456         .version = OVS_FLOW_VERSION,
1457         .maxattr = OVS_FLOW_ATTR_MAX,
1458         .netnsok = true,
1459         .parallel_ops = true,
1460         .ops = dp_flow_genl_ops,
1461         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1462         .mcgrps = &ovs_dp_flow_multicast_group,
1463         .n_mcgrps = 1,
1464 };
1465
1466 static size_t ovs_dp_cmd_msg_size(void)
1467 {
1468         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1469
1470         msgsize += nla_total_size(IFNAMSIZ);
1471         msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1472         msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1473         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1474
1475         return msgsize;
1476 }
1477
1478 /* Called with ovs_mutex. */
1479 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1480                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1481 {
1482         struct ovs_header *ovs_header;
1483         struct ovs_dp_stats dp_stats;
1484         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1485         int err;
1486
1487         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1488                                    flags, cmd);
1489         if (!ovs_header)
1490                 goto error;
1491
1492         ovs_header->dp_ifindex = get_dpifindex(dp);
1493
1494         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1495         if (err)
1496                 goto nla_put_failure;
1497
1498         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1499         if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1500                           &dp_stats, OVS_DP_ATTR_PAD))
1501                 goto nla_put_failure;
1502
1503         if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1504                           sizeof(struct ovs_dp_megaflow_stats),
1505                           &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1506                 goto nla_put_failure;
1507
1508         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1509                 goto nla_put_failure;
1510
1511         genlmsg_end(skb, ovs_header);
1512         return 0;
1513
1514 nla_put_failure:
1515         genlmsg_cancel(skb, ovs_header);
1516 error:
1517         return -EMSGSIZE;
1518 }
1519
1520 static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1521 {
1522         return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1523 }
1524
1525 /* Called with rcu_read_lock or ovs_mutex. */
1526 static struct datapath *lookup_datapath(struct net *net,
1527                                         const struct ovs_header *ovs_header,
1528                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1529 {
1530         struct datapath *dp;
1531
1532         if (!a[OVS_DP_ATTR_NAME])
1533                 dp = get_dp(net, ovs_header->dp_ifindex);
1534         else {
1535                 struct vport *vport;
1536
1537                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1538                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1539         }
1540         return dp ? dp : ERR_PTR(-ENODEV);
1541 }
1542
1543 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1544 {
1545         struct datapath *dp;
1546
1547         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1548         if (IS_ERR(dp))
1549                 return;
1550
1551         WARN(dp->user_features, "Dropping previously announced user features\n");
1552         dp->user_features = 0;
1553 }
1554
1555 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1556 {
1557         if (a[OVS_DP_ATTR_USER_FEATURES])
1558                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1559 }
1560
1561 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1562 {
1563         struct nlattr **a = info->attrs;
1564         struct vport_parms parms;
1565         struct sk_buff *reply;
1566         struct datapath *dp;
1567         struct vport *vport;
1568         struct ovs_net *ovs_net;
1569         int err, i;
1570
1571         err = -EINVAL;
1572         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1573                 goto err;
1574
1575         reply = ovs_dp_cmd_alloc_info();
1576         if (!reply)
1577                 return -ENOMEM;
1578
1579         err = -ENOMEM;
1580         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1581         if (dp == NULL)
1582                 goto err_free_reply;
1583
1584         ovs_dp_set_net(dp, sock_net(skb->sk));
1585
1586         /* Allocate table. */
1587         err = ovs_flow_tbl_init(&dp->table);
1588         if (err)
1589                 goto err_free_dp;
1590
1591         dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1592         if (!dp->stats_percpu) {
1593                 err = -ENOMEM;
1594                 goto err_destroy_table;
1595         }
1596
1597         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1598                             GFP_KERNEL);
1599         if (!dp->ports) {
1600                 err = -ENOMEM;
1601                 goto err_destroy_percpu;
1602         }
1603
1604         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1605                 INIT_HLIST_HEAD(&dp->ports[i]);
1606
1607         /* Set up our datapath device. */
1608         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1609         parms.type = OVS_VPORT_TYPE_INTERNAL;
1610         parms.options = NULL;
1611         parms.dp = dp;
1612         parms.port_no = OVSP_LOCAL;
1613         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1614
1615         ovs_dp_change(dp, a);
1616
1617         /* So far only local changes have been made, now need the lock. */
1618         ovs_lock();
1619
1620         vport = new_vport(&parms);
1621         if (IS_ERR(vport)) {
1622                 err = PTR_ERR(vport);
1623                 if (err == -EBUSY)
1624                         err = -EEXIST;
1625
1626                 if (err == -EEXIST) {
1627                         /* An outdated user space instance that does not understand
1628                          * the concept of user_features has attempted to create a new
1629                          * datapath and is likely to reuse it. Drop all user features.
1630                          */
1631                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1632                                 ovs_dp_reset_user_features(skb, info);
1633                 }
1634
1635                 goto err_destroy_ports_array;
1636         }
1637
1638         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1639                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1640         BUG_ON(err < 0);
1641
1642         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1643         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1644
1645         ovs_unlock();
1646
1647         ovs_notify(&dp_datapath_genl_family, reply, info);
1648         return 0;
1649
1650 err_destroy_ports_array:
1651         ovs_unlock();
1652         kfree(dp->ports);
1653 err_destroy_percpu:
1654         free_percpu(dp->stats_percpu);
1655 err_destroy_table:
1656         ovs_flow_tbl_destroy(&dp->table);
1657 err_free_dp:
1658         kfree(dp);
1659 err_free_reply:
1660         kfree_skb(reply);
1661 err:
1662         return err;
1663 }
1664
1665 /* Called with ovs_mutex. */
1666 static void __dp_destroy(struct datapath *dp)
1667 {
1668         int i;
1669
1670         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1671                 struct vport *vport;
1672                 struct hlist_node *n;
1673
1674                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1675                         if (vport->port_no != OVSP_LOCAL)
1676                                 ovs_dp_detach_port(vport);
1677         }
1678
1679         list_del_rcu(&dp->list_node);
1680
1681         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1682          * all ports in datapath are destroyed first before freeing datapath.
1683          */
1684         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1685
1686         /* RCU destroy the flow table */
1687         call_rcu(&dp->rcu, destroy_dp_rcu);
1688 }
1689
1690 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1691 {
1692         struct sk_buff *reply;
1693         struct datapath *dp;
1694         int err;
1695
1696         reply = ovs_dp_cmd_alloc_info();
1697         if (!reply)
1698                 return -ENOMEM;
1699
1700         ovs_lock();
1701         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1702         err = PTR_ERR(dp);
1703         if (IS_ERR(dp))
1704                 goto err_unlock_free;
1705
1706         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1707                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1708         BUG_ON(err < 0);
1709
1710         __dp_destroy(dp);
1711         ovs_unlock();
1712
1713         ovs_notify(&dp_datapath_genl_family, reply, info);
1714
1715         return 0;
1716
1717 err_unlock_free:
1718         ovs_unlock();
1719         kfree_skb(reply);
1720         return err;
1721 }
1722
1723 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1724 {
1725         struct sk_buff *reply;
1726         struct datapath *dp;
1727         int err;
1728
1729         reply = ovs_dp_cmd_alloc_info();
1730         if (!reply)
1731                 return -ENOMEM;
1732
1733         ovs_lock();
1734         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1735         err = PTR_ERR(dp);
1736         if (IS_ERR(dp))
1737                 goto err_unlock_free;
1738
1739         ovs_dp_change(dp, info->attrs);
1740
1741         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1742                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1743         BUG_ON(err < 0);
1744
1745         ovs_unlock();
1746         ovs_notify(&dp_datapath_genl_family, reply, info);
1747
1748         return 0;
1749
1750 err_unlock_free:
1751         ovs_unlock();
1752         kfree_skb(reply);
1753         return err;
1754 }
1755
1756 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1757 {
1758         struct sk_buff *reply;
1759         struct datapath *dp;
1760         int err;
1761
1762         reply = ovs_dp_cmd_alloc_info();
1763         if (!reply)
1764                 return -ENOMEM;
1765
1766         ovs_lock();
1767         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1768         if (IS_ERR(dp)) {
1769                 err = PTR_ERR(dp);
1770                 goto err_unlock_free;
1771         }
1772         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1773                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1774         BUG_ON(err < 0);
1775         ovs_unlock();
1776
1777         return genlmsg_reply(reply, info);
1778
1779 err_unlock_free:
1780         ovs_unlock();
1781         kfree_skb(reply);
1782         return err;
1783 }
1784
1785 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1786 {
1787         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1788         struct datapath *dp;
1789         int skip = cb->args[0];
1790         int i = 0;
1791
1792         ovs_lock();
1793         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1794                 if (i >= skip &&
1795                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1796                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1797                                          OVS_DP_CMD_NEW) < 0)
1798                         break;
1799                 i++;
1800         }
1801         ovs_unlock();
1802
1803         cb->args[0] = i;
1804
1805         return skb->len;
1806 }
1807
1808 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1809         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1810         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1811         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1812 };
1813
1814 static const struct genl_ops dp_datapath_genl_ops[] = {
1815         { .cmd = OVS_DP_CMD_NEW,
1816           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1817           .policy = datapath_policy,
1818           .doit = ovs_dp_cmd_new
1819         },
1820         { .cmd = OVS_DP_CMD_DEL,
1821           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1822           .policy = datapath_policy,
1823           .doit = ovs_dp_cmd_del
1824         },
1825         { .cmd = OVS_DP_CMD_GET,
1826           .flags = 0,               /* OK for unprivileged users. */
1827           .policy = datapath_policy,
1828           .doit = ovs_dp_cmd_get,
1829           .dumpit = ovs_dp_cmd_dump
1830         },
1831         { .cmd = OVS_DP_CMD_SET,
1832           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1833           .policy = datapath_policy,
1834           .doit = ovs_dp_cmd_set,
1835         },
1836 };
1837
1838 static struct genl_family dp_datapath_genl_family = {
1839         .id = GENL_ID_GENERATE,
1840         .hdrsize = sizeof(struct ovs_header),
1841         .name = OVS_DATAPATH_FAMILY,
1842         .version = OVS_DATAPATH_VERSION,
1843         .maxattr = OVS_DP_ATTR_MAX,
1844         .netnsok = true,
1845         .parallel_ops = true,
1846         .ops = dp_datapath_genl_ops,
1847         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1848         .mcgrps = &ovs_dp_datapath_multicast_group,
1849         .n_mcgrps = 1,
1850 };
1851
1852 /* Called with ovs_mutex or RCU read lock. */
1853 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1854                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1855 {
1856         struct ovs_header *ovs_header;
1857         struct ovs_vport_stats vport_stats;
1858         int err;
1859
1860         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1861                                  flags, cmd);
1862         if (!ovs_header)
1863                 return -EMSGSIZE;
1864
1865         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1866
1867         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1868             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1869             nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1870                            ovs_vport_name(vport)))
1871                 goto nla_put_failure;
1872
1873         ovs_vport_get_stats(vport, &vport_stats);
1874         if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1875                           sizeof(struct ovs_vport_stats), &vport_stats,
1876                           OVS_VPORT_ATTR_PAD))
1877                 goto nla_put_failure;
1878
1879         if (ovs_vport_get_upcall_portids(vport, skb))
1880                 goto nla_put_failure;
1881
1882         err = ovs_vport_get_options(vport, skb);
1883         if (err == -EMSGSIZE)
1884                 goto error;
1885
1886         genlmsg_end(skb, ovs_header);
1887         return 0;
1888
1889 nla_put_failure:
1890         err = -EMSGSIZE;
1891 error:
1892         genlmsg_cancel(skb, ovs_header);
1893         return err;
1894 }
1895
1896 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1897 {
1898         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1899 }
1900
1901 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1902 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1903                                          u32 seq, u8 cmd)
1904 {
1905         struct sk_buff *skb;
1906         int retval;
1907
1908         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1909         if (!skb)
1910                 return ERR_PTR(-ENOMEM);
1911
1912         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1913         BUG_ON(retval < 0);
1914
1915         return skb;
1916 }
1917
1918 /* Called with ovs_mutex or RCU read lock. */
1919 static struct vport *lookup_vport(struct net *net,
1920                                   const struct ovs_header *ovs_header,
1921                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1922 {
1923         struct datapath *dp;
1924         struct vport *vport;
1925
1926         if (a[OVS_VPORT_ATTR_NAME]) {
1927                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1928                 if (!vport)
1929                         return ERR_PTR(-ENODEV);
1930                 if (ovs_header->dp_ifindex &&
1931                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1932                         return ERR_PTR(-ENODEV);
1933                 return vport;
1934         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1935                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1936
1937                 if (port_no >= DP_MAX_PORTS)
1938                         return ERR_PTR(-EFBIG);
1939
1940                 dp = get_dp(net, ovs_header->dp_ifindex);
1941                 if (!dp)
1942                         return ERR_PTR(-ENODEV);
1943
1944                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1945                 if (!vport)
1946                         return ERR_PTR(-ENODEV);
1947                 return vport;
1948         } else
1949                 return ERR_PTR(-EINVAL);
1950 }
1951
1952 /* Called with ovs_mutex */
1953 static void update_headroom(struct datapath *dp)
1954 {
1955         unsigned dev_headroom, max_headroom = 0;
1956         struct net_device *dev;
1957         struct vport *vport;
1958         int i;
1959
1960         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1961                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1962                         dev = vport->dev;
1963                         dev_headroom = netdev_get_fwd_headroom(dev);
1964                         if (dev_headroom > max_headroom)
1965                                 max_headroom = dev_headroom;
1966                 }
1967         }
1968
1969         dp->max_headroom = max_headroom;
1970         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1971                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
1972                         netdev_set_rx_headroom(vport->dev, max_headroom);
1973 }
1974
1975 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1976 {
1977         struct nlattr **a = info->attrs;
1978         struct ovs_header *ovs_header = info->userhdr;
1979         struct vport_parms parms;
1980         struct sk_buff *reply;
1981         struct vport *vport;
1982         struct datapath *dp;
1983         u32 port_no;
1984         int err;
1985
1986         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1987             !a[OVS_VPORT_ATTR_UPCALL_PID])
1988                 return -EINVAL;
1989
1990         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1991                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1992         if (port_no >= DP_MAX_PORTS)
1993                 return -EFBIG;
1994
1995         reply = ovs_vport_cmd_alloc_info();
1996         if (!reply)
1997                 return -ENOMEM;
1998
1999         ovs_lock();
2000 restart:
2001         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
2002         err = -ENODEV;
2003         if (!dp)
2004                 goto exit_unlock_free;
2005
2006         if (port_no) {
2007                 vport = ovs_vport_ovsl(dp, port_no);
2008                 err = -EBUSY;
2009                 if (vport)
2010                         goto exit_unlock_free;
2011         } else {
2012                 for (port_no = 1; ; port_no++) {
2013                         if (port_no >= DP_MAX_PORTS) {
2014                                 err = -EFBIG;
2015                                 goto exit_unlock_free;
2016                         }
2017                         vport = ovs_vport_ovsl(dp, port_no);
2018                         if (!vport)
2019                                 break;
2020                 }
2021         }
2022
2023         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2024         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2025         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2026         parms.dp = dp;
2027         parms.port_no = port_no;
2028         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2029
2030         vport = new_vport(&parms);
2031         err = PTR_ERR(vport);
2032         if (IS_ERR(vport)) {
2033                 if (err == -EAGAIN)
2034                         goto restart;
2035                 goto exit_unlock_free;
2036         }
2037
2038         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2039                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2040
2041         if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
2042                 update_headroom(dp);
2043         else
2044                 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2045
2046         BUG_ON(err < 0);
2047         ovs_unlock();
2048
2049         ovs_notify(&dp_vport_genl_family, reply, info);
2050         return 0;
2051
2052 exit_unlock_free:
2053         ovs_unlock();
2054         kfree_skb(reply);
2055         return err;
2056 }
2057
2058 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2059 {
2060         struct nlattr **a = info->attrs;
2061         struct sk_buff *reply;
2062         struct vport *vport;
2063         int err;
2064
2065         reply = ovs_vport_cmd_alloc_info();
2066         if (!reply)
2067                 return -ENOMEM;
2068
2069         ovs_lock();
2070         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2071         err = PTR_ERR(vport);
2072         if (IS_ERR(vport))
2073                 goto exit_unlock_free;
2074
2075         if (a[OVS_VPORT_ATTR_TYPE] &&
2076             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2077                 err = -EINVAL;
2078                 goto exit_unlock_free;
2079         }
2080
2081         if (a[OVS_VPORT_ATTR_OPTIONS]) {
2082                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2083                 if (err)
2084                         goto exit_unlock_free;
2085         }
2086
2087
2088         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2089                 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2090
2091                 err = ovs_vport_set_upcall_portids(vport, ids);
2092                 if (err)
2093                         goto exit_unlock_free;
2094         }
2095
2096         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2097                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2098         BUG_ON(err < 0);
2099
2100         ovs_unlock();
2101         ovs_notify(&dp_vport_genl_family, reply, info);
2102         return 0;
2103
2104 exit_unlock_free:
2105         ovs_unlock();
2106         kfree_skb(reply);
2107         return err;
2108 }
2109
2110 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2111 {
2112         bool must_update_headroom = false;
2113         struct nlattr **a = info->attrs;
2114         struct sk_buff *reply;
2115         struct datapath *dp;
2116         struct vport *vport;
2117         int err;
2118
2119         reply = ovs_vport_cmd_alloc_info();
2120         if (!reply)
2121                 return -ENOMEM;
2122
2123         ovs_lock();
2124         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2125         err = PTR_ERR(vport);
2126         if (IS_ERR(vport))
2127                 goto exit_unlock_free;
2128
2129         if (vport->port_no == OVSP_LOCAL) {
2130                 err = -EINVAL;
2131                 goto exit_unlock_free;
2132         }
2133
2134         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2135                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
2136         BUG_ON(err < 0);
2137
2138         /* the vport deletion may trigger dp headroom update */
2139         dp = vport->dp;
2140         if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2141                 must_update_headroom = true;
2142         netdev_reset_rx_headroom(vport->dev);
2143         ovs_dp_detach_port(vport);
2144
2145         if (must_update_headroom)
2146                 update_headroom(dp);
2147         ovs_unlock();
2148
2149         ovs_notify(&dp_vport_genl_family, reply, info);
2150         return 0;
2151
2152 exit_unlock_free:
2153         ovs_unlock();
2154         kfree_skb(reply);
2155         return err;
2156 }
2157
2158 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2159 {
2160         struct nlattr **a = info->attrs;
2161         struct ovs_header *ovs_header = info->userhdr;
2162         struct sk_buff *reply;
2163         struct vport *vport;
2164         int err;
2165
2166         reply = ovs_vport_cmd_alloc_info();
2167         if (!reply)
2168                 return -ENOMEM;
2169
2170         rcu_read_lock();
2171         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2172         err = PTR_ERR(vport);
2173         if (IS_ERR(vport))
2174                 goto exit_unlock_free;
2175         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2176                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2177         BUG_ON(err < 0);
2178         rcu_read_unlock();
2179
2180         return genlmsg_reply(reply, info);
2181
2182 exit_unlock_free:
2183         rcu_read_unlock();
2184         kfree_skb(reply);
2185         return err;
2186 }
2187
2188 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2189 {
2190         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2191         struct datapath *dp;
2192         int bucket = cb->args[0], skip = cb->args[1];
2193         int i, j = 0;
2194
2195         rcu_read_lock();
2196         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2197         if (!dp) {
2198                 rcu_read_unlock();
2199                 return -ENODEV;
2200         }
2201         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2202                 struct vport *vport;
2203
2204                 j = 0;
2205                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2206                         if (j >= skip &&
2207                             ovs_vport_cmd_fill_info(vport, skb,
2208                                                     NETLINK_CB(cb->skb).portid,
2209                                                     cb->nlh->nlmsg_seq,
2210                                                     NLM_F_MULTI,
2211                                                     OVS_VPORT_CMD_NEW) < 0)
2212                                 goto out;
2213
2214                         j++;
2215                 }
2216                 skip = 0;
2217         }
2218 out:
2219         rcu_read_unlock();
2220
2221         cb->args[0] = i;
2222         cb->args[1] = j;
2223
2224         return skb->len;
2225 }
2226
2227 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2228         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2229         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2230         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2231         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2232         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
2233         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2234 };
2235
2236 static const struct genl_ops dp_vport_genl_ops[] = {
2237         { .cmd = OVS_VPORT_CMD_NEW,
2238           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2239           .policy = vport_policy,
2240           .doit = ovs_vport_cmd_new
2241         },
2242         { .cmd = OVS_VPORT_CMD_DEL,
2243           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2244           .policy = vport_policy,
2245           .doit = ovs_vport_cmd_del
2246         },
2247         { .cmd = OVS_VPORT_CMD_GET,
2248           .flags = 0,               /* OK for unprivileged users. */
2249           .policy = vport_policy,
2250           .doit = ovs_vport_cmd_get,
2251           .dumpit = ovs_vport_cmd_dump
2252         },
2253         { .cmd = OVS_VPORT_CMD_SET,
2254           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2255           .policy = vport_policy,
2256           .doit = ovs_vport_cmd_set,
2257         },
2258 };
2259
2260 struct genl_family dp_vport_genl_family = {
2261         .id = GENL_ID_GENERATE,
2262         .hdrsize = sizeof(struct ovs_header),
2263         .name = OVS_VPORT_FAMILY,
2264         .version = OVS_VPORT_VERSION,
2265         .maxattr = OVS_VPORT_ATTR_MAX,
2266         .netnsok = true,
2267         .parallel_ops = true,
2268         .ops = dp_vport_genl_ops,
2269         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2270         .mcgrps = &ovs_dp_vport_multicast_group,
2271         .n_mcgrps = 1,
2272 };
2273
2274 static struct genl_family * const dp_genl_families[] = {
2275         &dp_datapath_genl_family,
2276         &dp_vport_genl_family,
2277         &dp_flow_genl_family,
2278         &dp_packet_genl_family,
2279 };
2280
2281 static void dp_unregister_genl(int n_families)
2282 {
2283         int i;
2284
2285         for (i = 0; i < n_families; i++)
2286                 genl_unregister_family(dp_genl_families[i]);
2287 }
2288
2289 static int dp_register_genl(void)
2290 {
2291         int err;
2292         int i;
2293
2294         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2295
2296                 err = genl_register_family(dp_genl_families[i]);
2297                 if (err)
2298                         goto error;
2299         }
2300
2301         return 0;
2302
2303 error:
2304         dp_unregister_genl(i);
2305         return err;
2306 }
2307
2308 static int __net_init ovs_init_net(struct net *net)
2309 {
2310         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2311
2312         INIT_LIST_HEAD(&ovs_net->dps);
2313         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2314         ovs_ct_init(net);
2315         return 0;
2316 }
2317
2318 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2319                                             struct list_head *head)
2320 {
2321         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2322         struct datapath *dp;
2323
2324         list_for_each_entry(dp, &ovs_net->dps, list_node) {
2325                 int i;
2326
2327                 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2328                         struct vport *vport;
2329
2330                         hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2331                                 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2332                                         continue;
2333
2334                                 if (dev_net(vport->dev) == dnet)
2335                                         list_add(&vport->detach_list, head);
2336                         }
2337                 }
2338         }
2339 }
2340
2341 static void __net_exit ovs_exit_net(struct net *dnet)
2342 {
2343         struct datapath *dp, *dp_next;
2344         struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2345         struct vport *vport, *vport_next;
2346         struct net *net;
2347         LIST_HEAD(head);
2348
2349         ovs_ct_exit(dnet);
2350         ovs_lock();
2351         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2352                 __dp_destroy(dp);
2353
2354         rtnl_lock();
2355         for_each_net(net)
2356                 list_vports_from_net(net, dnet, &head);
2357         rtnl_unlock();
2358
2359         /* Detach all vports from given namespace. */
2360         list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2361                 list_del(&vport->detach_list);
2362                 ovs_dp_detach_port(vport);
2363         }
2364
2365         ovs_unlock();
2366
2367         cancel_work_sync(&ovs_net->dp_notify_work);
2368 }
2369
2370 static struct pernet_operations ovs_net_ops = {
2371         .init = ovs_init_net,
2372         .exit = ovs_exit_net,
2373         .id   = &ovs_net_id,
2374         .size = sizeof(struct ovs_net),
2375 };
2376
2377 static int __init dp_init(void)
2378 {
2379         int err;
2380
2381         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2382
2383         pr_info("Open vSwitch switching datapath\n");
2384
2385         err = action_fifos_init();
2386         if (err)
2387                 goto error;
2388
2389         err = ovs_internal_dev_rtnl_link_register();
2390         if (err)
2391                 goto error_action_fifos_exit;
2392
2393         err = ovs_flow_init();
2394         if (err)
2395                 goto error_unreg_rtnl_link;
2396
2397         err = ovs_vport_init();
2398         if (err)
2399                 goto error_flow_exit;
2400
2401         err = register_pernet_device(&ovs_net_ops);
2402         if (err)
2403                 goto error_vport_exit;
2404
2405         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2406         if (err)
2407                 goto error_netns_exit;
2408
2409         err = ovs_netdev_init();
2410         if (err)
2411                 goto error_unreg_notifier;
2412
2413         err = dp_register_genl();
2414         if (err < 0)
2415                 goto error_unreg_netdev;
2416
2417         return 0;
2418
2419 error_unreg_netdev:
2420         ovs_netdev_exit();
2421 error_unreg_notifier:
2422         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2423 error_netns_exit:
2424         unregister_pernet_device(&ovs_net_ops);
2425 error_vport_exit:
2426         ovs_vport_exit();
2427 error_flow_exit:
2428         ovs_flow_exit();
2429 error_unreg_rtnl_link:
2430         ovs_internal_dev_rtnl_link_unregister();
2431 error_action_fifos_exit:
2432         action_fifos_exit();
2433 error:
2434         return err;
2435 }
2436
2437 static void dp_cleanup(void)
2438 {
2439         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2440         ovs_netdev_exit();
2441         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2442         unregister_pernet_device(&ovs_net_ops);
2443         rcu_barrier();
2444         ovs_vport_exit();
2445         ovs_flow_exit();
2446         ovs_internal_dev_rtnl_link_unregister();
2447         action_fifos_exit();
2448 }
2449
2450 module_init(dp_init);
2451 module_exit(dp_cleanup);
2452
2453 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2454 MODULE_LICENSE("GPL");
2455 MODULE_ALIAS_GENL_FAMILY(OVS_DATAPATH_FAMILY);
2456 MODULE_ALIAS_GENL_FAMILY(OVS_VPORT_FAMILY);
2457 MODULE_ALIAS_GENL_FAMILY(OVS_FLOW_FAMILY);
2458 MODULE_ALIAS_GENL_FAMILY(OVS_PACKET_FAMILY);