GNU Linux-libre 4.4.285-gnu1
[releases.git] / drivers / net / ethernet / rocker / rocker.c
1 /*
2  * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3  * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4  * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
38 #include <net/arp.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
41
42 #include "rocker.h"
43
44 static const char rocker_driver_name[] = "rocker";
45
46 static const struct pci_device_id rocker_pci_id_table[] = {
47         {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48         {0, }
49 };
50
51 struct rocker_flow_tbl_key {
52         u32 priority;
53         enum rocker_of_dpa_table_id tbl_id;
54         union {
55                 struct {
56                         u32 in_pport;
57                         u32 in_pport_mask;
58                         enum rocker_of_dpa_table_id goto_tbl;
59                 } ig_port;
60                 struct {
61                         u32 in_pport;
62                         __be16 vlan_id;
63                         __be16 vlan_id_mask;
64                         enum rocker_of_dpa_table_id goto_tbl;
65                         bool untagged;
66                         __be16 new_vlan_id;
67                 } vlan;
68                 struct {
69                         u32 in_pport;
70                         u32 in_pport_mask;
71                         __be16 eth_type;
72                         u8 eth_dst[ETH_ALEN];
73                         u8 eth_dst_mask[ETH_ALEN];
74                         __be16 vlan_id;
75                         __be16 vlan_id_mask;
76                         enum rocker_of_dpa_table_id goto_tbl;
77                         bool copy_to_cpu;
78                 } term_mac;
79                 struct {
80                         __be16 eth_type;
81                         __be32 dst4;
82                         __be32 dst4_mask;
83                         enum rocker_of_dpa_table_id goto_tbl;
84                         u32 group_id;
85                 } ucast_routing;
86                 struct {
87                         u8 eth_dst[ETH_ALEN];
88                         u8 eth_dst_mask[ETH_ALEN];
89                         int has_eth_dst;
90                         int has_eth_dst_mask;
91                         __be16 vlan_id;
92                         u32 tunnel_id;
93                         enum rocker_of_dpa_table_id goto_tbl;
94                         u32 group_id;
95                         bool copy_to_cpu;
96                 } bridge;
97                 struct {
98                         u32 in_pport;
99                         u32 in_pport_mask;
100                         u8 eth_src[ETH_ALEN];
101                         u8 eth_src_mask[ETH_ALEN];
102                         u8 eth_dst[ETH_ALEN];
103                         u8 eth_dst_mask[ETH_ALEN];
104                         __be16 eth_type;
105                         __be16 vlan_id;
106                         __be16 vlan_id_mask;
107                         u8 ip_proto;
108                         u8 ip_proto_mask;
109                         u8 ip_tos;
110                         u8 ip_tos_mask;
111                         u32 group_id;
112                 } acl;
113         };
114 };
115
116 struct rocker_flow_tbl_entry {
117         struct hlist_node entry;
118         u32 cmd;
119         u64 cookie;
120         struct rocker_flow_tbl_key key;
121         size_t key_len;
122         u32 key_crc32; /* key */
123 };
124
125 struct rocker_group_tbl_entry {
126         struct hlist_node entry;
127         u32 cmd;
128         u32 group_id; /* key */
129         u16 group_count;
130         u32 *group_ids;
131         union {
132                 struct {
133                         u8 pop_vlan;
134                 } l2_interface;
135                 struct {
136                         u8 eth_src[ETH_ALEN];
137                         u8 eth_dst[ETH_ALEN];
138                         __be16 vlan_id;
139                         u32 group_id;
140                 } l2_rewrite;
141                 struct {
142                         u8 eth_src[ETH_ALEN];
143                         u8 eth_dst[ETH_ALEN];
144                         __be16 vlan_id;
145                         bool ttl_check;
146                         u32 group_id;
147                 } l3_unicast;
148         };
149 };
150
151 struct rocker_fdb_tbl_entry {
152         struct hlist_node entry;
153         u32 key_crc32; /* key */
154         bool learned;
155         unsigned long touched;
156         struct rocker_fdb_tbl_key {
157                 struct rocker_port *rocker_port;
158                 u8 addr[ETH_ALEN];
159                 __be16 vlan_id;
160         } key;
161 };
162
163 struct rocker_internal_vlan_tbl_entry {
164         struct hlist_node entry;
165         int ifindex; /* key */
166         u32 ref_count;
167         __be16 vlan_id;
168 };
169
170 struct rocker_neigh_tbl_entry {
171         struct hlist_node entry;
172         __be32 ip_addr; /* key */
173         struct net_device *dev;
174         u32 ref_count;
175         u32 index;
176         u8 eth_dst[ETH_ALEN];
177         bool ttl_check;
178 };
179
180 struct rocker_desc_info {
181         char *data; /* mapped */
182         size_t data_size;
183         size_t tlv_size;
184         struct rocker_desc *desc;
185         dma_addr_t mapaddr;
186 };
187
188 struct rocker_dma_ring_info {
189         size_t size;
190         u32 head;
191         u32 tail;
192         struct rocker_desc *desc; /* mapped */
193         dma_addr_t mapaddr;
194         struct rocker_desc_info *desc_info;
195         unsigned int type;
196 };
197
198 struct rocker;
199
200 enum {
201         ROCKER_CTRL_LINK_LOCAL_MCAST,
202         ROCKER_CTRL_LOCAL_ARP,
203         ROCKER_CTRL_IPV4_MCAST,
204         ROCKER_CTRL_IPV6_MCAST,
205         ROCKER_CTRL_DFLT_BRIDGING,
206         ROCKER_CTRL_DFLT_OVS,
207         ROCKER_CTRL_MAX,
208 };
209
210 #define ROCKER_INTERNAL_VLAN_ID_BASE    0x0f00
211 #define ROCKER_N_INTERNAL_VLANS         255
212 #define ROCKER_VLAN_BITMAP_LEN          BITS_TO_LONGS(VLAN_N_VID)
213 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
214
215 struct rocker_port {
216         struct net_device *dev;
217         struct net_device *bridge_dev;
218         struct rocker *rocker;
219         unsigned int port_number;
220         u32 pport;
221         __be16 internal_vlan_id;
222         int stp_state;
223         u32 brport_flags;
224         unsigned long ageing_time;
225         bool ctrls[ROCKER_CTRL_MAX];
226         unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
227         struct napi_struct napi_tx;
228         struct napi_struct napi_rx;
229         struct rocker_dma_ring_info tx_ring;
230         struct rocker_dma_ring_info rx_ring;
231 };
232
233 struct rocker {
234         struct pci_dev *pdev;
235         u8 __iomem *hw_addr;
236         struct msix_entry *msix_entries;
237         unsigned int port_count;
238         struct rocker_port **ports;
239         struct {
240                 u64 id;
241         } hw;
242         unsigned long ageing_time;
243         spinlock_t cmd_ring_lock;               /* for cmd ring accesses */
244         struct rocker_dma_ring_info cmd_ring;
245         struct rocker_dma_ring_info event_ring;
246         DECLARE_HASHTABLE(flow_tbl, 16);
247         spinlock_t flow_tbl_lock;               /* for flow tbl accesses */
248         u64 flow_tbl_next_cookie;
249         DECLARE_HASHTABLE(group_tbl, 16);
250         spinlock_t group_tbl_lock;              /* for group tbl accesses */
251         struct timer_list fdb_cleanup_timer;
252         DECLARE_HASHTABLE(fdb_tbl, 16);
253         spinlock_t fdb_tbl_lock;                /* for fdb tbl accesses */
254         unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
255         DECLARE_HASHTABLE(internal_vlan_tbl, 8);
256         spinlock_t internal_vlan_tbl_lock;      /* for vlan tbl accesses */
257         DECLARE_HASHTABLE(neigh_tbl, 16);
258         spinlock_t neigh_tbl_lock;              /* for neigh tbl accesses */
259         u32 neigh_tbl_next_index;
260 };
261
262 static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
263 static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
264 static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
265 static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
266 static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
267 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
268 static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
269 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
270 static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
271
272 /* Rocker priority levels for flow table entries.  Higher
273  * priority match takes precedence over lower priority match.
274  */
275
276 enum {
277         ROCKER_PRIORITY_UNKNOWN = 0,
278         ROCKER_PRIORITY_IG_PORT = 1,
279         ROCKER_PRIORITY_VLAN = 1,
280         ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
281         ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
282         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
283         ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
284         ROCKER_PRIORITY_BRIDGING_VLAN = 3,
285         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
286         ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
287         ROCKER_PRIORITY_BRIDGING_TENANT = 3,
288         ROCKER_PRIORITY_ACL_CTRL = 3,
289         ROCKER_PRIORITY_ACL_NORMAL = 2,
290         ROCKER_PRIORITY_ACL_DFLT = 1,
291 };
292
293 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
294 {
295         u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
296         u16 end = 0xffe;
297         u16 _vlan_id = ntohs(vlan_id);
298
299         return (_vlan_id >= start && _vlan_id <= end);
300 }
301
302 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
303                                       u16 vid, bool *pop_vlan)
304 {
305         __be16 vlan_id;
306
307         if (pop_vlan)
308                 *pop_vlan = false;
309         vlan_id = htons(vid);
310         if (!vlan_id) {
311                 vlan_id = rocker_port->internal_vlan_id;
312                 if (pop_vlan)
313                         *pop_vlan = true;
314         }
315
316         return vlan_id;
317 }
318
319 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
320                                    __be16 vlan_id)
321 {
322         if (rocker_vlan_id_is_internal(vlan_id))
323                 return 0;
324
325         return ntohs(vlan_id);
326 }
327
328 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
329 {
330         return rocker_port->bridge_dev &&
331                netif_is_bridge_master(rocker_port->bridge_dev);
332 }
333
334 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
335 {
336         return rocker_port->bridge_dev &&
337                netif_is_ovs_master(rocker_port->bridge_dev);
338 }
339
340 #define ROCKER_OP_FLAG_REMOVE           BIT(0)
341 #define ROCKER_OP_FLAG_NOWAIT           BIT(1)
342 #define ROCKER_OP_FLAG_LEARNED          BIT(2)
343 #define ROCKER_OP_FLAG_REFRESH          BIT(3)
344
345 static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
346                                      struct switchdev_trans *trans, int flags,
347                                      size_t size)
348 {
349         struct switchdev_trans_item *elem = NULL;
350         gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
351                           GFP_ATOMIC : GFP_KERNEL;
352
353         /* If in transaction prepare phase, allocate the memory
354          * and enqueue it on a transaction.  If in transaction
355          * commit phase, dequeue the memory from the transaction
356          * rather than re-allocating the memory.  The idea is the
357          * driver code paths for prepare and commit are identical
358          * so the memory allocated in the prepare phase is the
359          * memory used in the commit phase.
360          */
361
362         if (!trans) {
363                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
364         } else if (switchdev_trans_ph_prepare(trans)) {
365                 elem = kzalloc(size + sizeof(*elem), gfp_flags);
366                 if (!elem)
367                         return NULL;
368                 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
369         } else {
370                 elem = switchdev_trans_item_dequeue(trans);
371         }
372
373         return elem ? elem + 1 : NULL;
374 }
375
376 static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
377                                  struct switchdev_trans *trans, int flags,
378                                  size_t size)
379 {
380         return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
381 }
382
383 static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
384                                  struct switchdev_trans *trans, int flags,
385                                  size_t n, size_t size)
386 {
387         return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
388 }
389
390 static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem)
391 {
392         struct switchdev_trans_item *elem;
393
394         /* Frees are ignored if in transaction prepare phase.  The
395          * memory remains on the per-port list until freed in the
396          * commit phase.
397          */
398
399         if (switchdev_trans_ph_prepare(trans))
400                 return;
401
402         elem = (struct switchdev_trans_item *) mem - 1;
403         kfree(elem);
404 }
405
406 struct rocker_wait {
407         wait_queue_head_t wait;
408         bool done;
409         bool nowait;
410 };
411
412 static void rocker_wait_reset(struct rocker_wait *wait)
413 {
414         wait->done = false;
415         wait->nowait = false;
416 }
417
418 static void rocker_wait_init(struct rocker_wait *wait)
419 {
420         init_waitqueue_head(&wait->wait);
421         rocker_wait_reset(wait);
422 }
423
424 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
425                                               struct switchdev_trans *trans,
426                                               int flags)
427 {
428         struct rocker_wait *wait;
429
430         wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
431         if (!wait)
432                 return NULL;
433         rocker_wait_init(wait);
434         return wait;
435 }
436
437 static void rocker_wait_destroy(struct switchdev_trans *trans,
438                                 struct rocker_wait *wait)
439 {
440         rocker_port_kfree(trans, wait);
441 }
442
443 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
444                                       unsigned long timeout)
445 {
446         wait_event_timeout(wait->wait, wait->done, HZ / 10);
447         if (!wait->done)
448                 return false;
449         return true;
450 }
451
452 static void rocker_wait_wake_up(struct rocker_wait *wait)
453 {
454         wait->done = true;
455         wake_up(&wait->wait);
456 }
457
458 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
459 {
460         return rocker->msix_entries[vector].vector;
461 }
462
463 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
464 {
465         return rocker_msix_vector(rocker_port->rocker,
466                                   ROCKER_MSIX_VEC_TX(rocker_port->port_number));
467 }
468
469 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
470 {
471         return rocker_msix_vector(rocker_port->rocker,
472                                   ROCKER_MSIX_VEC_RX(rocker_port->port_number));
473 }
474
475 #define rocker_write32(rocker, reg, val)        \
476         writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
477 #define rocker_read32(rocker, reg)      \
478         readl((rocker)->hw_addr + (ROCKER_ ## reg))
479 #define rocker_write64(rocker, reg, val)        \
480         writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
481 #define rocker_read64(rocker, reg)      \
482         readq((rocker)->hw_addr + (ROCKER_ ## reg))
483
484 /*****************************
485  * HW basic testing functions
486  *****************************/
487
488 static int rocker_reg_test(const struct rocker *rocker)
489 {
490         const struct pci_dev *pdev = rocker->pdev;
491         u64 test_reg;
492         u64 rnd;
493
494         rnd = prandom_u32();
495         rnd >>= 1;
496         rocker_write32(rocker, TEST_REG, rnd);
497         test_reg = rocker_read32(rocker, TEST_REG);
498         if (test_reg != rnd * 2) {
499                 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
500                         test_reg, rnd * 2);
501                 return -EIO;
502         }
503
504         rnd = prandom_u32();
505         rnd <<= 31;
506         rnd |= prandom_u32();
507         rocker_write64(rocker, TEST_REG64, rnd);
508         test_reg = rocker_read64(rocker, TEST_REG64);
509         if (test_reg != rnd * 2) {
510                 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
511                         test_reg, rnd * 2);
512                 return -EIO;
513         }
514
515         return 0;
516 }
517
518 static int rocker_dma_test_one(const struct rocker *rocker,
519                                struct rocker_wait *wait, u32 test_type,
520                                dma_addr_t dma_handle, const unsigned char *buf,
521                                const unsigned char *expect, size_t size)
522 {
523         const struct pci_dev *pdev = rocker->pdev;
524         int i;
525
526         rocker_wait_reset(wait);
527         rocker_write32(rocker, TEST_DMA_CTRL, test_type);
528
529         if (!rocker_wait_event_timeout(wait, HZ / 10)) {
530                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
531                 return -EIO;
532         }
533
534         for (i = 0; i < size; i++) {
535                 if (buf[i] != expect[i]) {
536                         dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
537                                 buf[i], i, expect[i]);
538                         return -EIO;
539                 }
540         }
541         return 0;
542 }
543
544 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
545 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
546
547 static int rocker_dma_test_offset(const struct rocker *rocker,
548                                   struct rocker_wait *wait, int offset)
549 {
550         struct pci_dev *pdev = rocker->pdev;
551         unsigned char *alloc;
552         unsigned char *buf;
553         unsigned char *expect;
554         dma_addr_t dma_handle;
555         int i;
556         int err;
557
558         alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
559                         GFP_KERNEL | GFP_DMA);
560         if (!alloc)
561                 return -ENOMEM;
562         buf = alloc + offset;
563         expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
564
565         dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
566                                     PCI_DMA_BIDIRECTIONAL);
567         if (pci_dma_mapping_error(pdev, dma_handle)) {
568                 err = -EIO;
569                 goto free_alloc;
570         }
571
572         rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
573         rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
574
575         memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
576         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
577                                   dma_handle, buf, expect,
578                                   ROCKER_TEST_DMA_BUF_SIZE);
579         if (err)
580                 goto unmap;
581
582         memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
583         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
584                                   dma_handle, buf, expect,
585                                   ROCKER_TEST_DMA_BUF_SIZE);
586         if (err)
587                 goto unmap;
588
589         prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
590         for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
591                 expect[i] = ~buf[i];
592         err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
593                                   dma_handle, buf, expect,
594                                   ROCKER_TEST_DMA_BUF_SIZE);
595         if (err)
596                 goto unmap;
597
598 unmap:
599         pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
600                          PCI_DMA_BIDIRECTIONAL);
601 free_alloc:
602         kfree(alloc);
603
604         return err;
605 }
606
607 static int rocker_dma_test(const struct rocker *rocker,
608                            struct rocker_wait *wait)
609 {
610         int i;
611         int err;
612
613         for (i = 0; i < 8; i++) {
614                 err = rocker_dma_test_offset(rocker, wait, i);
615                 if (err)
616                         return err;
617         }
618         return 0;
619 }
620
621 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
622 {
623         struct rocker_wait *wait = dev_id;
624
625         rocker_wait_wake_up(wait);
626
627         return IRQ_HANDLED;
628 }
629
630 static int rocker_basic_hw_test(const struct rocker *rocker)
631 {
632         const struct pci_dev *pdev = rocker->pdev;
633         struct rocker_wait wait;
634         int err;
635
636         err = rocker_reg_test(rocker);
637         if (err) {
638                 dev_err(&pdev->dev, "reg test failed\n");
639                 return err;
640         }
641
642         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
643                           rocker_test_irq_handler, 0,
644                           rocker_driver_name, &wait);
645         if (err) {
646                 dev_err(&pdev->dev, "cannot assign test irq\n");
647                 return err;
648         }
649
650         rocker_wait_init(&wait);
651         rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
652
653         if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
654                 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
655                 err = -EIO;
656                 goto free_irq;
657         }
658
659         err = rocker_dma_test(rocker, &wait);
660         if (err)
661                 dev_err(&pdev->dev, "dma test failed\n");
662
663 free_irq:
664         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
665         return err;
666 }
667
668 /******
669  * TLV
670  ******/
671
672 #define ROCKER_TLV_ALIGNTO 8U
673 #define ROCKER_TLV_ALIGN(len) \
674         (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
675 #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
676
677 /*  <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
678  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
679  * |             Header          | Pad |           Payload           | Pad |
680  * |      (struct rocker_tlv)    | ing |                             | ing |
681  * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
682  *  <--------------------------- tlv->len -------------------------->
683  */
684
685 static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
686                                           int *remaining)
687 {
688         int totlen = ROCKER_TLV_ALIGN(tlv->len);
689
690         *remaining -= totlen;
691         return (struct rocker_tlv *) ((char *) tlv + totlen);
692 }
693
694 static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
695 {
696         return remaining >= (int) ROCKER_TLV_HDRLEN &&
697                tlv->len >= ROCKER_TLV_HDRLEN &&
698                tlv->len <= remaining;
699 }
700
701 #define rocker_tlv_for_each(pos, head, len, rem)        \
702         for (pos = head, rem = len;                     \
703              rocker_tlv_ok(pos, rem);                   \
704              pos = rocker_tlv_next(pos, &(rem)))
705
706 #define rocker_tlv_for_each_nested(pos, tlv, rem)       \
707         rocker_tlv_for_each(pos, rocker_tlv_data(tlv),  \
708                             rocker_tlv_len(tlv), rem)
709
710 static int rocker_tlv_attr_size(int payload)
711 {
712         return ROCKER_TLV_HDRLEN + payload;
713 }
714
715 static int rocker_tlv_total_size(int payload)
716 {
717         return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
718 }
719
720 static int rocker_tlv_padlen(int payload)
721 {
722         return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
723 }
724
725 static int rocker_tlv_type(const struct rocker_tlv *tlv)
726 {
727         return tlv->type;
728 }
729
730 static void *rocker_tlv_data(const struct rocker_tlv *tlv)
731 {
732         return (char *) tlv + ROCKER_TLV_HDRLEN;
733 }
734
735 static int rocker_tlv_len(const struct rocker_tlv *tlv)
736 {
737         return tlv->len - ROCKER_TLV_HDRLEN;
738 }
739
740 static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
741 {
742         return *(u8 *) rocker_tlv_data(tlv);
743 }
744
745 static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
746 {
747         return *(u16 *) rocker_tlv_data(tlv);
748 }
749
750 static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
751 {
752         return *(__be16 *) rocker_tlv_data(tlv);
753 }
754
755 static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
756 {
757         return *(u32 *) rocker_tlv_data(tlv);
758 }
759
760 static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
761 {
762         return *(u64 *) rocker_tlv_data(tlv);
763 }
764
765 static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
766                              const char *buf, int buf_len)
767 {
768         const struct rocker_tlv *tlv;
769         const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
770         int rem;
771
772         memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
773
774         rocker_tlv_for_each(tlv, head, buf_len, rem) {
775                 u32 type = rocker_tlv_type(tlv);
776
777                 if (type > 0 && type <= maxtype)
778                         tb[type] = tlv;
779         }
780 }
781
782 static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
783                                     const struct rocker_tlv *tlv)
784 {
785         rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
786                          rocker_tlv_len(tlv));
787 }
788
789 static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
790                                   const struct rocker_desc_info *desc_info)
791 {
792         rocker_tlv_parse(tb, maxtype, desc_info->data,
793                          desc_info->desc->tlv_size);
794 }
795
796 static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
797 {
798         return (struct rocker_tlv *) ((char *) desc_info->data +
799                                                desc_info->tlv_size);
800 }
801
802 static int rocker_tlv_put(struct rocker_desc_info *desc_info,
803                           int attrtype, int attrlen, const void *data)
804 {
805         int tail_room = desc_info->data_size - desc_info->tlv_size;
806         int total_size = rocker_tlv_total_size(attrlen);
807         struct rocker_tlv *tlv;
808
809         if (unlikely(tail_room < total_size))
810                 return -EMSGSIZE;
811
812         tlv = rocker_tlv_start(desc_info);
813         desc_info->tlv_size += total_size;
814         tlv->type = attrtype;
815         tlv->len = rocker_tlv_attr_size(attrlen);
816         memcpy(rocker_tlv_data(tlv), data, attrlen);
817         memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
818         return 0;
819 }
820
821 static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
822                              int attrtype, u8 value)
823 {
824         u8 tmp = value; /* work around GCC PR81715 */
825
826         return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
827 }
828
829 static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
830                               int attrtype, u16 value)
831 {
832         u16 tmp = value;
833
834         return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
835 }
836
837 static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
838                                int attrtype, __be16 value)
839 {
840         __be16 tmp = value;
841
842         return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
843 }
844
845 static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
846                               int attrtype, u32 value)
847 {
848         u32 tmp = value;
849
850         return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
851 }
852
853 static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
854                                int attrtype, __be32 value)
855 {
856         __be32 tmp = value;
857
858         return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
859 }
860
861 static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
862                               int attrtype, u64 value)
863 {
864         u64 tmp = value;
865
866         return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
867 }
868
869 static struct rocker_tlv *
870 rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
871 {
872         struct rocker_tlv *start = rocker_tlv_start(desc_info);
873
874         if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
875                 return NULL;
876
877         return start;
878 }
879
880 static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
881                                 struct rocker_tlv *start)
882 {
883         start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
884 }
885
886 static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
887                                    const struct rocker_tlv *start)
888 {
889         desc_info->tlv_size = (const char *) start - desc_info->data;
890 }
891
892 /******************************************
893  * DMA rings and descriptors manipulations
894  ******************************************/
895
896 static u32 __pos_inc(u32 pos, size_t limit)
897 {
898         return ++pos == limit ? 0 : pos;
899 }
900
901 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
902 {
903         int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
904
905         switch (err) {
906         case ROCKER_OK:
907                 return 0;
908         case -ROCKER_ENOENT:
909                 return -ENOENT;
910         case -ROCKER_ENXIO:
911                 return -ENXIO;
912         case -ROCKER_ENOMEM:
913                 return -ENOMEM;
914         case -ROCKER_EEXIST:
915                 return -EEXIST;
916         case -ROCKER_EINVAL:
917                 return -EINVAL;
918         case -ROCKER_EMSGSIZE:
919                 return -EMSGSIZE;
920         case -ROCKER_ENOTSUP:
921                 return -EOPNOTSUPP;
922         case -ROCKER_ENOBUFS:
923                 return -ENOBUFS;
924         }
925
926         return -EINVAL;
927 }
928
929 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
930 {
931         desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
932 }
933
934 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
935 {
936         u32 comp_err = desc_info->desc->comp_err;
937
938         return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
939 }
940
941 static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
942 {
943         return (void *)(uintptr_t)desc_info->desc->cookie;
944 }
945
946 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
947                                        void *ptr)
948 {
949         desc_info->desc->cookie = (uintptr_t) ptr;
950 }
951
952 static struct rocker_desc_info *
953 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
954 {
955         static struct rocker_desc_info *desc_info;
956         u32 head = __pos_inc(info->head, info->size);
957
958         desc_info = &info->desc_info[info->head];
959         if (head == info->tail)
960                 return NULL; /* ring full */
961         desc_info->tlv_size = 0;
962         return desc_info;
963 }
964
965 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
966 {
967         desc_info->desc->buf_size = desc_info->data_size;
968         desc_info->desc->tlv_size = desc_info->tlv_size;
969 }
970
971 static void rocker_desc_head_set(const struct rocker *rocker,
972                                  struct rocker_dma_ring_info *info,
973                                  const struct rocker_desc_info *desc_info)
974 {
975         u32 head = __pos_inc(info->head, info->size);
976
977         BUG_ON(head == info->tail);
978         rocker_desc_commit(desc_info);
979         info->head = head;
980         rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
981 }
982
983 static struct rocker_desc_info *
984 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
985 {
986         static struct rocker_desc_info *desc_info;
987
988         if (info->tail == info->head)
989                 return NULL; /* nothing to be done between head and tail */
990         desc_info = &info->desc_info[info->tail];
991         if (!rocker_desc_gen(desc_info))
992                 return NULL; /* gen bit not set, desc is not ready yet */
993         info->tail = __pos_inc(info->tail, info->size);
994         desc_info->tlv_size = desc_info->desc->tlv_size;
995         return desc_info;
996 }
997
998 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
999                                         const struct rocker_dma_ring_info *info,
1000                                         u32 credits)
1001 {
1002         if (credits)
1003                 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
1004 }
1005
1006 static unsigned long rocker_dma_ring_size_fix(size_t size)
1007 {
1008         return max(ROCKER_DMA_SIZE_MIN,
1009                    min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
1010 }
1011
1012 static int rocker_dma_ring_create(const struct rocker *rocker,
1013                                   unsigned int type,
1014                                   size_t size,
1015                                   struct rocker_dma_ring_info *info)
1016 {
1017         int i;
1018
1019         BUG_ON(size != rocker_dma_ring_size_fix(size));
1020         info->size = size;
1021         info->type = type;
1022         info->head = 0;
1023         info->tail = 0;
1024         info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1025                                   GFP_KERNEL);
1026         if (!info->desc_info)
1027                 return -ENOMEM;
1028
1029         info->desc = pci_alloc_consistent(rocker->pdev,
1030                                           info->size * sizeof(*info->desc),
1031                                           &info->mapaddr);
1032         if (!info->desc) {
1033                 kfree(info->desc_info);
1034                 return -ENOMEM;
1035         }
1036
1037         for (i = 0; i < info->size; i++)
1038                 info->desc_info[i].desc = &info->desc[i];
1039
1040         rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1041                        ROCKER_DMA_DESC_CTRL_RESET);
1042         rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1043         rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1044
1045         return 0;
1046 }
1047
1048 static void rocker_dma_ring_destroy(const struct rocker *rocker,
1049                                     const struct rocker_dma_ring_info *info)
1050 {
1051         rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1052
1053         pci_free_consistent(rocker->pdev,
1054                             info->size * sizeof(struct rocker_desc),
1055                             info->desc, info->mapaddr);
1056         kfree(info->desc_info);
1057 }
1058
1059 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
1060                                              struct rocker_dma_ring_info *info)
1061 {
1062         int i;
1063
1064         BUG_ON(info->head || info->tail);
1065
1066         /* When ring is consumer, we need to advance head for each desc.
1067          * That tells hw that the desc is ready to be used by it.
1068          */
1069         for (i = 0; i < info->size - 1; i++)
1070                 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1071         rocker_desc_commit(&info->desc_info[i]);
1072 }
1073
1074 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1075                                       const struct rocker_dma_ring_info *info,
1076                                       int direction, size_t buf_size)
1077 {
1078         struct pci_dev *pdev = rocker->pdev;
1079         int i;
1080         int err;
1081
1082         for (i = 0; i < info->size; i++) {
1083                 struct rocker_desc_info *desc_info = &info->desc_info[i];
1084                 struct rocker_desc *desc = &info->desc[i];
1085                 dma_addr_t dma_handle;
1086                 char *buf;
1087
1088                 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1089                 if (!buf) {
1090                         err = -ENOMEM;
1091                         goto rollback;
1092                 }
1093
1094                 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1095                 if (pci_dma_mapping_error(pdev, dma_handle)) {
1096                         kfree(buf);
1097                         err = -EIO;
1098                         goto rollback;
1099                 }
1100
1101                 desc_info->data = buf;
1102                 desc_info->data_size = buf_size;
1103                 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1104
1105                 desc->buf_addr = dma_handle;
1106                 desc->buf_size = buf_size;
1107         }
1108         return 0;
1109
1110 rollback:
1111         for (i--; i >= 0; i--) {
1112                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1113
1114                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1115                                  desc_info->data_size, direction);
1116                 kfree(desc_info->data);
1117         }
1118         return err;
1119 }
1120
1121 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1122                                       const struct rocker_dma_ring_info *info,
1123                                       int direction)
1124 {
1125         struct pci_dev *pdev = rocker->pdev;
1126         int i;
1127
1128         for (i = 0; i < info->size; i++) {
1129                 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1130                 struct rocker_desc *desc = &info->desc[i];
1131
1132                 desc->buf_addr = 0;
1133                 desc->buf_size = 0;
1134                 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1135                                  desc_info->data_size, direction);
1136                 kfree(desc_info->data);
1137         }
1138 }
1139
1140 static int rocker_dma_rings_init(struct rocker *rocker)
1141 {
1142         const struct pci_dev *pdev = rocker->pdev;
1143         int err;
1144
1145         err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1146                                      ROCKER_DMA_CMD_DEFAULT_SIZE,
1147                                      &rocker->cmd_ring);
1148         if (err) {
1149                 dev_err(&pdev->dev, "failed to create command dma ring\n");
1150                 return err;
1151         }
1152
1153         spin_lock_init(&rocker->cmd_ring_lock);
1154
1155         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1156                                          PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1157         if (err) {
1158                 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1159                 goto err_dma_cmd_ring_bufs_alloc;
1160         }
1161
1162         err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1163                                      ROCKER_DMA_EVENT_DEFAULT_SIZE,
1164                                      &rocker->event_ring);
1165         if (err) {
1166                 dev_err(&pdev->dev, "failed to create event dma ring\n");
1167                 goto err_dma_event_ring_create;
1168         }
1169
1170         err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1171                                          PCI_DMA_FROMDEVICE, PAGE_SIZE);
1172         if (err) {
1173                 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1174                 goto err_dma_event_ring_bufs_alloc;
1175         }
1176         rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1177         return 0;
1178
1179 err_dma_event_ring_bufs_alloc:
1180         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1181 err_dma_event_ring_create:
1182         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1183                                   PCI_DMA_BIDIRECTIONAL);
1184 err_dma_cmd_ring_bufs_alloc:
1185         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1186         return err;
1187 }
1188
1189 static void rocker_dma_rings_fini(struct rocker *rocker)
1190 {
1191         rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1192                                   PCI_DMA_BIDIRECTIONAL);
1193         rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1194         rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1195                                   PCI_DMA_BIDIRECTIONAL);
1196         rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1197 }
1198
1199 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
1200                                       struct rocker_desc_info *desc_info,
1201                                       struct sk_buff *skb, size_t buf_len)
1202 {
1203         const struct rocker *rocker = rocker_port->rocker;
1204         struct pci_dev *pdev = rocker->pdev;
1205         dma_addr_t dma_handle;
1206
1207         dma_handle = pci_map_single(pdev, skb->data, buf_len,
1208                                     PCI_DMA_FROMDEVICE);
1209         if (pci_dma_mapping_error(pdev, dma_handle))
1210                 return -EIO;
1211         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1212                 goto tlv_put_failure;
1213         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1214                 goto tlv_put_failure;
1215         return 0;
1216
1217 tlv_put_failure:
1218         pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1219         desc_info->tlv_size = 0;
1220         return -EMSGSIZE;
1221 }
1222
1223 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
1224 {
1225         return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1226 }
1227
1228 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
1229                                         struct rocker_desc_info *desc_info)
1230 {
1231         struct net_device *dev = rocker_port->dev;
1232         struct sk_buff *skb;
1233         size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1234         int err;
1235
1236         /* Ensure that hw will see tlv_size zero in case of an error.
1237          * That tells hw to use another descriptor.
1238          */
1239         rocker_desc_cookie_ptr_set(desc_info, NULL);
1240         desc_info->tlv_size = 0;
1241
1242         skb = netdev_alloc_skb_ip_align(dev, buf_len);
1243         if (!skb)
1244                 return -ENOMEM;
1245         err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
1246         if (err) {
1247                 dev_kfree_skb_any(skb);
1248                 return err;
1249         }
1250         rocker_desc_cookie_ptr_set(desc_info, skb);
1251         return 0;
1252 }
1253
1254 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1255                                          const struct rocker_tlv **attrs)
1256 {
1257         struct pci_dev *pdev = rocker->pdev;
1258         dma_addr_t dma_handle;
1259         size_t len;
1260
1261         if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1262             !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1263                 return;
1264         dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1265         len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1266         pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1267 }
1268
1269 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1270                                         const struct rocker_desc_info *desc_info)
1271 {
1272         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1273         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1274
1275         if (!skb)
1276                 return;
1277         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1278         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1279         dev_kfree_skb_any(skb);
1280 }
1281
1282 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1283 {
1284         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1285         const struct rocker *rocker = rocker_port->rocker;
1286         int i;
1287         int err;
1288
1289         for (i = 0; i < rx_ring->size; i++) {
1290                 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1291                                                    &rx_ring->desc_info[i]);
1292                 if (err)
1293                         goto rollback;
1294         }
1295         return 0;
1296
1297 rollback:
1298         for (i--; i >= 0; i--)
1299                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1300         return err;
1301 }
1302
1303 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1304 {
1305         const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1306         const struct rocker *rocker = rocker_port->rocker;
1307         int i;
1308
1309         for (i = 0; i < rx_ring->size; i++)
1310                 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1311 }
1312
1313 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1314 {
1315         struct rocker *rocker = rocker_port->rocker;
1316         int err;
1317
1318         err = rocker_dma_ring_create(rocker,
1319                                      ROCKER_DMA_TX(rocker_port->port_number),
1320                                      ROCKER_DMA_TX_DEFAULT_SIZE,
1321                                      &rocker_port->tx_ring);
1322         if (err) {
1323                 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1324                 return err;
1325         }
1326
1327         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1328                                          PCI_DMA_TODEVICE,
1329                                          ROCKER_DMA_TX_DESC_SIZE);
1330         if (err) {
1331                 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1332                 goto err_dma_tx_ring_bufs_alloc;
1333         }
1334
1335         err = rocker_dma_ring_create(rocker,
1336                                      ROCKER_DMA_RX(rocker_port->port_number),
1337                                      ROCKER_DMA_RX_DEFAULT_SIZE,
1338                                      &rocker_port->rx_ring);
1339         if (err) {
1340                 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1341                 goto err_dma_rx_ring_create;
1342         }
1343
1344         err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1345                                          PCI_DMA_BIDIRECTIONAL,
1346                                          ROCKER_DMA_RX_DESC_SIZE);
1347         if (err) {
1348                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1349                 goto err_dma_rx_ring_bufs_alloc;
1350         }
1351
1352         err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1353         if (err) {
1354                 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1355                 goto err_dma_rx_ring_skbs_alloc;
1356         }
1357         rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1358
1359         return 0;
1360
1361 err_dma_rx_ring_skbs_alloc:
1362         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1363                                   PCI_DMA_BIDIRECTIONAL);
1364 err_dma_rx_ring_bufs_alloc:
1365         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1366 err_dma_rx_ring_create:
1367         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1368                                   PCI_DMA_TODEVICE);
1369 err_dma_tx_ring_bufs_alloc:
1370         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1371         return err;
1372 }
1373
1374 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1375 {
1376         struct rocker *rocker = rocker_port->rocker;
1377
1378         rocker_dma_rx_ring_skbs_free(rocker_port);
1379         rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1380                                   PCI_DMA_BIDIRECTIONAL);
1381         rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1382         rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1383                                   PCI_DMA_TODEVICE);
1384         rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1385 }
1386
1387 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1388                                    bool enable)
1389 {
1390         u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1391
1392         if (enable)
1393                 val |= 1ULL << rocker_port->pport;
1394         else
1395                 val &= ~(1ULL << rocker_port->pport);
1396         rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1397 }
1398
1399 /********************************
1400  * Interrupt handler and helpers
1401  ********************************/
1402
1403 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1404 {
1405         struct rocker *rocker = dev_id;
1406         const struct rocker_desc_info *desc_info;
1407         struct rocker_wait *wait;
1408         u32 credits = 0;
1409
1410         spin_lock(&rocker->cmd_ring_lock);
1411         while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1412                 wait = rocker_desc_cookie_ptr_get(desc_info);
1413                 if (wait->nowait) {
1414                         rocker_desc_gen_clear(desc_info);
1415                         rocker_wait_destroy(NULL, wait);
1416                 } else {
1417                         rocker_wait_wake_up(wait);
1418                 }
1419                 credits++;
1420         }
1421         spin_unlock(&rocker->cmd_ring_lock);
1422         rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1423
1424         return IRQ_HANDLED;
1425 }
1426
1427 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1428 {
1429         netif_carrier_on(rocker_port->dev);
1430         netdev_info(rocker_port->dev, "Link is up\n");
1431 }
1432
1433 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1434 {
1435         netif_carrier_off(rocker_port->dev);
1436         netdev_info(rocker_port->dev, "Link is down\n");
1437 }
1438
1439 static int rocker_event_link_change(const struct rocker *rocker,
1440                                     const struct rocker_tlv *info)
1441 {
1442         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1443         unsigned int port_number;
1444         bool link_up;
1445         struct rocker_port *rocker_port;
1446
1447         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1448         if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1449             !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1450                 return -EIO;
1451         port_number =
1452                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1453         link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1454
1455         if (port_number >= rocker->port_count)
1456                 return -EINVAL;
1457
1458         rocker_port = rocker->ports[port_number];
1459         if (netif_carrier_ok(rocker_port->dev) != link_up) {
1460                 if (link_up)
1461                         rocker_port_link_up(rocker_port);
1462                 else
1463                         rocker_port_link_down(rocker_port);
1464         }
1465
1466         return 0;
1467 }
1468
1469 static int rocker_port_fdb(struct rocker_port *rocker_port,
1470                            struct switchdev_trans *trans,
1471                            const unsigned char *addr,
1472                            __be16 vlan_id, int flags);
1473
1474 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1475                                       const struct rocker_tlv *info)
1476 {
1477         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1478         unsigned int port_number;
1479         struct rocker_port *rocker_port;
1480         const unsigned char *addr;
1481         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1482         __be16 vlan_id;
1483
1484         rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1485         if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1486             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1487             !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1488                 return -EIO;
1489         port_number =
1490                 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1491         addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1492         vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1493
1494         if (port_number >= rocker->port_count)
1495                 return -EINVAL;
1496
1497         rocker_port = rocker->ports[port_number];
1498
1499         if (rocker_port->stp_state != BR_STATE_LEARNING &&
1500             rocker_port->stp_state != BR_STATE_FORWARDING)
1501                 return 0;
1502
1503         return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1504 }
1505
1506 static int rocker_event_process(const struct rocker *rocker,
1507                                 const struct rocker_desc_info *desc_info)
1508 {
1509         const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1510         const struct rocker_tlv *info;
1511         u16 type;
1512
1513         rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1514         if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1515             !attrs[ROCKER_TLV_EVENT_INFO])
1516                 return -EIO;
1517
1518         type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1519         info = attrs[ROCKER_TLV_EVENT_INFO];
1520
1521         switch (type) {
1522         case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1523                 return rocker_event_link_change(rocker, info);
1524         case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1525                 return rocker_event_mac_vlan_seen(rocker, info);
1526         }
1527
1528         return -EOPNOTSUPP;
1529 }
1530
1531 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1532 {
1533         struct rocker *rocker = dev_id;
1534         const struct pci_dev *pdev = rocker->pdev;
1535         const struct rocker_desc_info *desc_info;
1536         u32 credits = 0;
1537         int err;
1538
1539         while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1540                 err = rocker_desc_err(desc_info);
1541                 if (err) {
1542                         dev_err(&pdev->dev, "event desc received with err %d\n",
1543                                 err);
1544                 } else {
1545                         err = rocker_event_process(rocker, desc_info);
1546                         if (err)
1547                                 dev_err(&pdev->dev, "event processing failed with err %d\n",
1548                                         err);
1549                 }
1550                 rocker_desc_gen_clear(desc_info);
1551                 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1552                 credits++;
1553         }
1554         rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1555
1556         return IRQ_HANDLED;
1557 }
1558
1559 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1560 {
1561         struct rocker_port *rocker_port = dev_id;
1562
1563         napi_schedule(&rocker_port->napi_tx);
1564         return IRQ_HANDLED;
1565 }
1566
1567 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1568 {
1569         struct rocker_port *rocker_port = dev_id;
1570
1571         napi_schedule(&rocker_port->napi_rx);
1572         return IRQ_HANDLED;
1573 }
1574
1575 /********************
1576  * Command interface
1577  ********************/
1578
1579 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1580                                     struct rocker_desc_info *desc_info,
1581                                     void *priv);
1582
1583 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1584                                     const struct rocker_desc_info *desc_info,
1585                                     void *priv);
1586
1587 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1588                            struct switchdev_trans *trans, int flags,
1589                            rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1590                            rocker_cmd_proc_cb_t process, void *process_priv)
1591 {
1592         struct rocker *rocker = rocker_port->rocker;
1593         struct rocker_desc_info *desc_info;
1594         struct rocker_wait *wait;
1595         bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1596         unsigned long lock_flags;
1597         int err;
1598
1599         wait = rocker_wait_create(rocker_port, trans, flags);
1600         if (!wait)
1601                 return -ENOMEM;
1602         wait->nowait = nowait;
1603
1604         spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1605
1606         desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1607         if (!desc_info) {
1608                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1609                 err = -EAGAIN;
1610                 goto out;
1611         }
1612
1613         err = prepare(rocker_port, desc_info, prepare_priv);
1614         if (err) {
1615                 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1616                 goto out;
1617         }
1618
1619         rocker_desc_cookie_ptr_set(desc_info, wait);
1620
1621         if (!switchdev_trans_ph_prepare(trans))
1622                 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1623
1624         spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1625
1626         if (nowait)
1627                 return 0;
1628
1629         if (!switchdev_trans_ph_prepare(trans))
1630                 if (!rocker_wait_event_timeout(wait, HZ / 10))
1631                         return -EIO;
1632
1633         err = rocker_desc_err(desc_info);
1634         if (err)
1635                 return err;
1636
1637         if (process)
1638                 err = process(rocker_port, desc_info, process_priv);
1639
1640         rocker_desc_gen_clear(desc_info);
1641 out:
1642         rocker_wait_destroy(trans, wait);
1643         return err;
1644 }
1645
1646 static int
1647 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1648                                   struct rocker_desc_info *desc_info,
1649                                   void *priv)
1650 {
1651         struct rocker_tlv *cmd_info;
1652
1653         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1654                                ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1655                 return -EMSGSIZE;
1656         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1657         if (!cmd_info)
1658                 return -EMSGSIZE;
1659         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1660                                rocker_port->pport))
1661                 return -EMSGSIZE;
1662         rocker_tlv_nest_end(desc_info, cmd_info);
1663         return 0;
1664 }
1665
1666 static int
1667 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1668                                           const struct rocker_desc_info *desc_info,
1669                                           void *priv)
1670 {
1671         struct ethtool_cmd *ecmd = priv;
1672         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1673         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1674         u32 speed;
1675         u8 duplex;
1676         u8 autoneg;
1677
1678         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1679         if (!attrs[ROCKER_TLV_CMD_INFO])
1680                 return -EIO;
1681
1682         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1683                                 attrs[ROCKER_TLV_CMD_INFO]);
1684         if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1685             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1686             !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1687                 return -EIO;
1688
1689         speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1690         duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1691         autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1692
1693         ecmd->transceiver = XCVR_INTERNAL;
1694         ecmd->supported = SUPPORTED_TP;
1695         ecmd->phy_address = 0xff;
1696         ecmd->port = PORT_TP;
1697         ethtool_cmd_speed_set(ecmd, speed);
1698         ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1699         ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1700
1701         return 0;
1702 }
1703
1704 static int
1705 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1706                                           const struct rocker_desc_info *desc_info,
1707                                           void *priv)
1708 {
1709         unsigned char *macaddr = priv;
1710         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1711         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1712         const struct rocker_tlv *attr;
1713
1714         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1715         if (!attrs[ROCKER_TLV_CMD_INFO])
1716                 return -EIO;
1717
1718         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1719                                 attrs[ROCKER_TLV_CMD_INFO]);
1720         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1721         if (!attr)
1722                 return -EIO;
1723
1724         if (rocker_tlv_len(attr) != ETH_ALEN)
1725                 return -EINVAL;
1726
1727         ether_addr_copy(macaddr, rocker_tlv_data(attr));
1728         return 0;
1729 }
1730
1731 struct port_name {
1732         char *buf;
1733         size_t len;
1734 };
1735
1736 static int
1737 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1738                                             const struct rocker_desc_info *desc_info,
1739                                             void *priv)
1740 {
1741         const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1742         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1743         struct port_name *name = priv;
1744         const struct rocker_tlv *attr;
1745         size_t i, j, len;
1746         const char *str;
1747
1748         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1749         if (!attrs[ROCKER_TLV_CMD_INFO])
1750                 return -EIO;
1751
1752         rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1753                                 attrs[ROCKER_TLV_CMD_INFO]);
1754         attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1755         if (!attr)
1756                 return -EIO;
1757
1758         len = min_t(size_t, rocker_tlv_len(attr), name->len);
1759         str = rocker_tlv_data(attr);
1760
1761         /* make sure name only contains alphanumeric characters */
1762         for (i = j = 0; i < len; ++i) {
1763                 if (isalnum(str[i])) {
1764                         name->buf[j] = str[i];
1765                         j++;
1766                 }
1767         }
1768
1769         if (j == 0)
1770                 return -EIO;
1771
1772         name->buf[j] = '\0';
1773
1774         return 0;
1775 }
1776
1777 static int
1778 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1779                                           struct rocker_desc_info *desc_info,
1780                                           void *priv)
1781 {
1782         struct ethtool_cmd *ecmd = priv;
1783         struct rocker_tlv *cmd_info;
1784
1785         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1786                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1787                 return -EMSGSIZE;
1788         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1789         if (!cmd_info)
1790                 return -EMSGSIZE;
1791         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1792                                rocker_port->pport))
1793                 return -EMSGSIZE;
1794         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1795                                ethtool_cmd_speed(ecmd)))
1796                 return -EMSGSIZE;
1797         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1798                               ecmd->duplex))
1799                 return -EMSGSIZE;
1800         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1801                               ecmd->autoneg))
1802                 return -EMSGSIZE;
1803         rocker_tlv_nest_end(desc_info, cmd_info);
1804         return 0;
1805 }
1806
1807 static int
1808 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1809                                           struct rocker_desc_info *desc_info,
1810                                           void *priv)
1811 {
1812         const unsigned char *macaddr = priv;
1813         struct rocker_tlv *cmd_info;
1814
1815         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1816                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1817                 return -EMSGSIZE;
1818         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1819         if (!cmd_info)
1820                 return -EMSGSIZE;
1821         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1822                                rocker_port->pport))
1823                 return -EMSGSIZE;
1824         if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1825                            ETH_ALEN, macaddr))
1826                 return -EMSGSIZE;
1827         rocker_tlv_nest_end(desc_info, cmd_info);
1828         return 0;
1829 }
1830
1831 static int
1832 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1833                                       struct rocker_desc_info *desc_info,
1834                                       void *priv)
1835 {
1836         int mtu = *(int *)priv;
1837         struct rocker_tlv *cmd_info;
1838
1839         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1840                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1841                 return -EMSGSIZE;
1842         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1843         if (!cmd_info)
1844                 return -EMSGSIZE;
1845         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1846                                rocker_port->pport))
1847                 return -EMSGSIZE;
1848         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1849                                mtu))
1850                 return -EMSGSIZE;
1851         rocker_tlv_nest_end(desc_info, cmd_info);
1852         return 0;
1853 }
1854
1855 static int
1856 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1857                                   struct rocker_desc_info *desc_info,
1858                                   void *priv)
1859 {
1860         struct rocker_tlv *cmd_info;
1861
1862         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1863                                ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1864                 return -EMSGSIZE;
1865         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1866         if (!cmd_info)
1867                 return -EMSGSIZE;
1868         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1869                                rocker_port->pport))
1870                 return -EMSGSIZE;
1871         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1872                               !!(rocker_port->brport_flags & BR_LEARNING)))
1873                 return -EMSGSIZE;
1874         rocker_tlv_nest_end(desc_info, cmd_info);
1875         return 0;
1876 }
1877
1878 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1879                                                 struct ethtool_cmd *ecmd)
1880 {
1881         return rocker_cmd_exec(rocker_port, NULL, 0,
1882                                rocker_cmd_get_port_settings_prep, NULL,
1883                                rocker_cmd_get_port_settings_ethtool_proc,
1884                                ecmd);
1885 }
1886
1887 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1888                                                 unsigned char *macaddr)
1889 {
1890         return rocker_cmd_exec(rocker_port, NULL, 0,
1891                                rocker_cmd_get_port_settings_prep, NULL,
1892                                rocker_cmd_get_port_settings_macaddr_proc,
1893                                macaddr);
1894 }
1895
1896 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1897                                                 struct ethtool_cmd *ecmd)
1898 {
1899         return rocker_cmd_exec(rocker_port, NULL, 0,
1900                                rocker_cmd_set_port_settings_ethtool_prep,
1901                                ecmd, NULL, NULL);
1902 }
1903
1904 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1905                                                 unsigned char *macaddr)
1906 {
1907         return rocker_cmd_exec(rocker_port, NULL, 0,
1908                                rocker_cmd_set_port_settings_macaddr_prep,
1909                                macaddr, NULL, NULL);
1910 }
1911
1912 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1913                                             int mtu)
1914 {
1915         return rocker_cmd_exec(rocker_port, NULL, 0,
1916                                rocker_cmd_set_port_settings_mtu_prep,
1917                                &mtu, NULL, NULL);
1918 }
1919
1920 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1921                                     struct switchdev_trans *trans)
1922 {
1923         return rocker_cmd_exec(rocker_port, trans, 0,
1924                                rocker_cmd_set_port_learning_prep,
1925                                NULL, NULL, NULL);
1926 }
1927
1928 static int
1929 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1930                                 const struct rocker_flow_tbl_entry *entry)
1931 {
1932         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1933                                entry->key.ig_port.in_pport))
1934                 return -EMSGSIZE;
1935         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1936                                entry->key.ig_port.in_pport_mask))
1937                 return -EMSGSIZE;
1938         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1939                                entry->key.ig_port.goto_tbl))
1940                 return -EMSGSIZE;
1941
1942         return 0;
1943 }
1944
1945 static int
1946 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1947                              const struct rocker_flow_tbl_entry *entry)
1948 {
1949         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1950                                entry->key.vlan.in_pport))
1951                 return -EMSGSIZE;
1952         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1953                                 entry->key.vlan.vlan_id))
1954                 return -EMSGSIZE;
1955         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1956                                 entry->key.vlan.vlan_id_mask))
1957                 return -EMSGSIZE;
1958         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1959                                entry->key.vlan.goto_tbl))
1960                 return -EMSGSIZE;
1961         if (entry->key.vlan.untagged &&
1962             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1963                                 entry->key.vlan.new_vlan_id))
1964                 return -EMSGSIZE;
1965
1966         return 0;
1967 }
1968
1969 static int
1970 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1971                                  const struct rocker_flow_tbl_entry *entry)
1972 {
1973         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1974                                entry->key.term_mac.in_pport))
1975                 return -EMSGSIZE;
1976         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1977                                entry->key.term_mac.in_pport_mask))
1978                 return -EMSGSIZE;
1979         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1980                                 entry->key.term_mac.eth_type))
1981                 return -EMSGSIZE;
1982         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1983                            ETH_ALEN, entry->key.term_mac.eth_dst))
1984                 return -EMSGSIZE;
1985         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1986                            ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1987                 return -EMSGSIZE;
1988         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1989                                 entry->key.term_mac.vlan_id))
1990                 return -EMSGSIZE;
1991         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1992                                 entry->key.term_mac.vlan_id_mask))
1993                 return -EMSGSIZE;
1994         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1995                                entry->key.term_mac.goto_tbl))
1996                 return -EMSGSIZE;
1997         if (entry->key.term_mac.copy_to_cpu &&
1998             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1999                               entry->key.term_mac.copy_to_cpu))
2000                 return -EMSGSIZE;
2001
2002         return 0;
2003 }
2004
2005 static int
2006 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
2007                                       const struct rocker_flow_tbl_entry *entry)
2008 {
2009         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2010                                 entry->key.ucast_routing.eth_type))
2011                 return -EMSGSIZE;
2012         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2013                                 entry->key.ucast_routing.dst4))
2014                 return -EMSGSIZE;
2015         if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2016                                 entry->key.ucast_routing.dst4_mask))
2017                 return -EMSGSIZE;
2018         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2019                                entry->key.ucast_routing.goto_tbl))
2020                 return -EMSGSIZE;
2021         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2022                                entry->key.ucast_routing.group_id))
2023                 return -EMSGSIZE;
2024
2025         return 0;
2026 }
2027
2028 static int
2029 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2030                                const struct rocker_flow_tbl_entry *entry)
2031 {
2032         if (entry->key.bridge.has_eth_dst &&
2033             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2034                            ETH_ALEN, entry->key.bridge.eth_dst))
2035                 return -EMSGSIZE;
2036         if (entry->key.bridge.has_eth_dst_mask &&
2037             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2038                            ETH_ALEN, entry->key.bridge.eth_dst_mask))
2039                 return -EMSGSIZE;
2040         if (entry->key.bridge.vlan_id &&
2041             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2042                                 entry->key.bridge.vlan_id))
2043                 return -EMSGSIZE;
2044         if (entry->key.bridge.tunnel_id &&
2045             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2046                                entry->key.bridge.tunnel_id))
2047                 return -EMSGSIZE;
2048         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2049                                entry->key.bridge.goto_tbl))
2050                 return -EMSGSIZE;
2051         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2052                                entry->key.bridge.group_id))
2053                 return -EMSGSIZE;
2054         if (entry->key.bridge.copy_to_cpu &&
2055             rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2056                               entry->key.bridge.copy_to_cpu))
2057                 return -EMSGSIZE;
2058
2059         return 0;
2060 }
2061
2062 static int
2063 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2064                             const struct rocker_flow_tbl_entry *entry)
2065 {
2066         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2067                                entry->key.acl.in_pport))
2068                 return -EMSGSIZE;
2069         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2070                                entry->key.acl.in_pport_mask))
2071                 return -EMSGSIZE;
2072         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2073                            ETH_ALEN, entry->key.acl.eth_src))
2074                 return -EMSGSIZE;
2075         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2076                            ETH_ALEN, entry->key.acl.eth_src_mask))
2077                 return -EMSGSIZE;
2078         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2079                            ETH_ALEN, entry->key.acl.eth_dst))
2080                 return -EMSGSIZE;
2081         if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2082                            ETH_ALEN, entry->key.acl.eth_dst_mask))
2083                 return -EMSGSIZE;
2084         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2085                                 entry->key.acl.eth_type))
2086                 return -EMSGSIZE;
2087         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2088                                 entry->key.acl.vlan_id))
2089                 return -EMSGSIZE;
2090         if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2091                                 entry->key.acl.vlan_id_mask))
2092                 return -EMSGSIZE;
2093
2094         switch (ntohs(entry->key.acl.eth_type)) {
2095         case ETH_P_IP:
2096         case ETH_P_IPV6:
2097                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2098                                       entry->key.acl.ip_proto))
2099                         return -EMSGSIZE;
2100                 if (rocker_tlv_put_u8(desc_info,
2101                                       ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2102                                       entry->key.acl.ip_proto_mask))
2103                         return -EMSGSIZE;
2104                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2105                                       entry->key.acl.ip_tos & 0x3f))
2106                         return -EMSGSIZE;
2107                 if (rocker_tlv_put_u8(desc_info,
2108                                       ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2109                                       entry->key.acl.ip_tos_mask & 0x3f))
2110                         return -EMSGSIZE;
2111                 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2112                                       (entry->key.acl.ip_tos & 0xc0) >> 6))
2113                         return -EMSGSIZE;
2114                 if (rocker_tlv_put_u8(desc_info,
2115                                       ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2116                                       (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2117                         return -EMSGSIZE;
2118                 break;
2119         }
2120
2121         if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2122             rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2123                                entry->key.acl.group_id))
2124                 return -EMSGSIZE;
2125
2126         return 0;
2127 }
2128
2129 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2130                                    struct rocker_desc_info *desc_info,
2131                                    void *priv)
2132 {
2133         const struct rocker_flow_tbl_entry *entry = priv;
2134         struct rocker_tlv *cmd_info;
2135         int err = 0;
2136
2137         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2138                 return -EMSGSIZE;
2139         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2140         if (!cmd_info)
2141                 return -EMSGSIZE;
2142         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2143                                entry->key.tbl_id))
2144                 return -EMSGSIZE;
2145         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2146                                entry->key.priority))
2147                 return -EMSGSIZE;
2148         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2149                 return -EMSGSIZE;
2150         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2151                                entry->cookie))
2152                 return -EMSGSIZE;
2153
2154         switch (entry->key.tbl_id) {
2155         case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2156                 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2157                 break;
2158         case ROCKER_OF_DPA_TABLE_ID_VLAN:
2159                 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2160                 break;
2161         case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2162                 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2163                 break;
2164         case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2165                 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2166                 break;
2167         case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2168                 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2169                 break;
2170         case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2171                 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2172                 break;
2173         default:
2174                 err = -ENOTSUPP;
2175                 break;
2176         }
2177
2178         if (err)
2179                 return err;
2180
2181         rocker_tlv_nest_end(desc_info, cmd_info);
2182
2183         return 0;
2184 }
2185
2186 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2187                                    struct rocker_desc_info *desc_info,
2188                                    void *priv)
2189 {
2190         const struct rocker_flow_tbl_entry *entry = priv;
2191         struct rocker_tlv *cmd_info;
2192
2193         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2194                 return -EMSGSIZE;
2195         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2196         if (!cmd_info)
2197                 return -EMSGSIZE;
2198         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2199                                entry->cookie))
2200                 return -EMSGSIZE;
2201         rocker_tlv_nest_end(desc_info, cmd_info);
2202
2203         return 0;
2204 }
2205
2206 static int
2207 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2208                                       struct rocker_group_tbl_entry *entry)
2209 {
2210         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2211                                ROCKER_GROUP_PORT_GET(entry->group_id)))
2212                 return -EMSGSIZE;
2213         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2214                               entry->l2_interface.pop_vlan))
2215                 return -EMSGSIZE;
2216
2217         return 0;
2218 }
2219
2220 static int
2221 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2222                                     const struct rocker_group_tbl_entry *entry)
2223 {
2224         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2225                                entry->l2_rewrite.group_id))
2226                 return -EMSGSIZE;
2227         if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2228             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2229                            ETH_ALEN, entry->l2_rewrite.eth_src))
2230                 return -EMSGSIZE;
2231         if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2232             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2233                            ETH_ALEN, entry->l2_rewrite.eth_dst))
2234                 return -EMSGSIZE;
2235         if (entry->l2_rewrite.vlan_id &&
2236             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2237                                 entry->l2_rewrite.vlan_id))
2238                 return -EMSGSIZE;
2239
2240         return 0;
2241 }
2242
2243 static int
2244 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2245                                    const struct rocker_group_tbl_entry *entry)
2246 {
2247         int i;
2248         struct rocker_tlv *group_ids;
2249
2250         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2251                                entry->group_count))
2252                 return -EMSGSIZE;
2253
2254         group_ids = rocker_tlv_nest_start(desc_info,
2255                                           ROCKER_TLV_OF_DPA_GROUP_IDS);
2256         if (!group_ids)
2257                 return -EMSGSIZE;
2258
2259         for (i = 0; i < entry->group_count; i++)
2260                 /* Note TLV array is 1-based */
2261                 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2262                         return -EMSGSIZE;
2263
2264         rocker_tlv_nest_end(desc_info, group_ids);
2265
2266         return 0;
2267 }
2268
2269 static int
2270 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2271                                     const struct rocker_group_tbl_entry *entry)
2272 {
2273         if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2274             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2275                            ETH_ALEN, entry->l3_unicast.eth_src))
2276                 return -EMSGSIZE;
2277         if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2278             rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2279                            ETH_ALEN, entry->l3_unicast.eth_dst))
2280                 return -EMSGSIZE;
2281         if (entry->l3_unicast.vlan_id &&
2282             rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2283                                 entry->l3_unicast.vlan_id))
2284                 return -EMSGSIZE;
2285         if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2286                               entry->l3_unicast.ttl_check))
2287                 return -EMSGSIZE;
2288         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2289                                entry->l3_unicast.group_id))
2290                 return -EMSGSIZE;
2291
2292         return 0;
2293 }
2294
2295 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2296                                     struct rocker_desc_info *desc_info,
2297                                     void *priv)
2298 {
2299         struct rocker_group_tbl_entry *entry = priv;
2300         struct rocker_tlv *cmd_info;
2301         int err = 0;
2302
2303         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2304                 return -EMSGSIZE;
2305         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2306         if (!cmd_info)
2307                 return -EMSGSIZE;
2308
2309         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2310                                entry->group_id))
2311                 return -EMSGSIZE;
2312
2313         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2314         case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2315                 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2316                 break;
2317         case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2318                 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2319                 break;
2320         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2321         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2322                 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2323                 break;
2324         case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2325                 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2326                 break;
2327         default:
2328                 err = -ENOTSUPP;
2329                 break;
2330         }
2331
2332         if (err)
2333                 return err;
2334
2335         rocker_tlv_nest_end(desc_info, cmd_info);
2336
2337         return 0;
2338 }
2339
2340 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2341                                     struct rocker_desc_info *desc_info,
2342                                     void *priv)
2343 {
2344         const struct rocker_group_tbl_entry *entry = priv;
2345         struct rocker_tlv *cmd_info;
2346
2347         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2348                 return -EMSGSIZE;
2349         cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2350         if (!cmd_info)
2351                 return -EMSGSIZE;
2352         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2353                                entry->group_id))
2354                 return -EMSGSIZE;
2355         rocker_tlv_nest_end(desc_info, cmd_info);
2356
2357         return 0;
2358 }
2359
2360 /***************************************************
2361  * Flow, group, FDB, internal VLAN and neigh tables
2362  ***************************************************/
2363
2364 static int rocker_init_tbls(struct rocker *rocker)
2365 {
2366         hash_init(rocker->flow_tbl);
2367         spin_lock_init(&rocker->flow_tbl_lock);
2368
2369         hash_init(rocker->group_tbl);
2370         spin_lock_init(&rocker->group_tbl_lock);
2371
2372         hash_init(rocker->fdb_tbl);
2373         spin_lock_init(&rocker->fdb_tbl_lock);
2374
2375         hash_init(rocker->internal_vlan_tbl);
2376         spin_lock_init(&rocker->internal_vlan_tbl_lock);
2377
2378         hash_init(rocker->neigh_tbl);
2379         spin_lock_init(&rocker->neigh_tbl_lock);
2380
2381         return 0;
2382 }
2383
2384 static void rocker_free_tbls(struct rocker *rocker)
2385 {
2386         unsigned long flags;
2387         struct rocker_flow_tbl_entry *flow_entry;
2388         struct rocker_group_tbl_entry *group_entry;
2389         struct rocker_fdb_tbl_entry *fdb_entry;
2390         struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2391         struct rocker_neigh_tbl_entry *neigh_entry;
2392         struct hlist_node *tmp;
2393         int bkt;
2394
2395         spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2396         hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2397                 hash_del(&flow_entry->entry);
2398         spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2399
2400         spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2401         hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2402                 hash_del(&group_entry->entry);
2403         spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2404
2405         spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2406         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2407                 hash_del(&fdb_entry->entry);
2408         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2409
2410         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2411         hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2412                            tmp, internal_vlan_entry, entry)
2413                 hash_del(&internal_vlan_entry->entry);
2414         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2415
2416         spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2417         hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2418                 hash_del(&neigh_entry->entry);
2419         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2420 }
2421
2422 static struct rocker_flow_tbl_entry *
2423 rocker_flow_tbl_find(const struct rocker *rocker,
2424                      const struct rocker_flow_tbl_entry *match)
2425 {
2426         struct rocker_flow_tbl_entry *found;
2427         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2428
2429         hash_for_each_possible(rocker->flow_tbl, found,
2430                                entry, match->key_crc32) {
2431                 if (memcmp(&found->key, &match->key, key_len) == 0)
2432                         return found;
2433         }
2434
2435         return NULL;
2436 }
2437
2438 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2439                                struct switchdev_trans *trans, int flags,
2440                                struct rocker_flow_tbl_entry *match)
2441 {
2442         struct rocker *rocker = rocker_port->rocker;
2443         struct rocker_flow_tbl_entry *found;
2444         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2445         unsigned long lock_flags;
2446
2447         match->key_crc32 = crc32(~0, &match->key, key_len);
2448
2449         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2450
2451         found = rocker_flow_tbl_find(rocker, match);
2452
2453         if (found) {
2454                 match->cookie = found->cookie;
2455                 if (!switchdev_trans_ph_prepare(trans))
2456                         hash_del(&found->entry);
2457                 rocker_port_kfree(trans, found);
2458                 found = match;
2459                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2460         } else {
2461                 found = match;
2462                 found->cookie = rocker->flow_tbl_next_cookie++;
2463                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2464         }
2465
2466         if (!switchdev_trans_ph_prepare(trans))
2467                 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2468
2469         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2470
2471         return rocker_cmd_exec(rocker_port, trans, flags,
2472                                rocker_cmd_flow_tbl_add, found, NULL, NULL);
2473 }
2474
2475 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2476                                struct switchdev_trans *trans, int flags,
2477                                struct rocker_flow_tbl_entry *match)
2478 {
2479         struct rocker *rocker = rocker_port->rocker;
2480         struct rocker_flow_tbl_entry *found;
2481         size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2482         unsigned long lock_flags;
2483         int err = 0;
2484
2485         match->key_crc32 = crc32(~0, &match->key, key_len);
2486
2487         spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2488
2489         found = rocker_flow_tbl_find(rocker, match);
2490
2491         if (found) {
2492                 if (!switchdev_trans_ph_prepare(trans))
2493                         hash_del(&found->entry);
2494                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2495         }
2496
2497         spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2498
2499         rocker_port_kfree(trans, match);
2500
2501         if (found) {
2502                 err = rocker_cmd_exec(rocker_port, trans, flags,
2503                                       rocker_cmd_flow_tbl_del,
2504                                       found, NULL, NULL);
2505                 rocker_port_kfree(trans, found);
2506         }
2507
2508         return err;
2509 }
2510
2511 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2512                               struct switchdev_trans *trans, int flags,
2513                               struct rocker_flow_tbl_entry *entry)
2514 {
2515         if (flags & ROCKER_OP_FLAG_REMOVE)
2516                 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2517         else
2518                 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2519 }
2520
2521 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2522                                    struct switchdev_trans *trans, int flags,
2523                                    u32 in_pport, u32 in_pport_mask,
2524                                    enum rocker_of_dpa_table_id goto_tbl)
2525 {
2526         struct rocker_flow_tbl_entry *entry;
2527
2528         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2529         if (!entry)
2530                 return -ENOMEM;
2531
2532         entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2533         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2534         entry->key.ig_port.in_pport = in_pport;
2535         entry->key.ig_port.in_pport_mask = in_pport_mask;
2536         entry->key.ig_port.goto_tbl = goto_tbl;
2537
2538         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2539 }
2540
2541 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2542                                 struct switchdev_trans *trans, int flags,
2543                                 u32 in_pport, __be16 vlan_id,
2544                                 __be16 vlan_id_mask,
2545                                 enum rocker_of_dpa_table_id goto_tbl,
2546                                 bool untagged, __be16 new_vlan_id)
2547 {
2548         struct rocker_flow_tbl_entry *entry;
2549
2550         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2551         if (!entry)
2552                 return -ENOMEM;
2553
2554         entry->key.priority = ROCKER_PRIORITY_VLAN;
2555         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2556         entry->key.vlan.in_pport = in_pport;
2557         entry->key.vlan.vlan_id = vlan_id;
2558         entry->key.vlan.vlan_id_mask = vlan_id_mask;
2559         entry->key.vlan.goto_tbl = goto_tbl;
2560
2561         entry->key.vlan.untagged = untagged;
2562         entry->key.vlan.new_vlan_id = new_vlan_id;
2563
2564         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2565 }
2566
2567 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2568                                     struct switchdev_trans *trans,
2569                                     u32 in_pport, u32 in_pport_mask,
2570                                     __be16 eth_type, const u8 *eth_dst,
2571                                     const u8 *eth_dst_mask, __be16 vlan_id,
2572                                     __be16 vlan_id_mask, bool copy_to_cpu,
2573                                     int flags)
2574 {
2575         struct rocker_flow_tbl_entry *entry;
2576
2577         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2578         if (!entry)
2579                 return -ENOMEM;
2580
2581         if (is_multicast_ether_addr(eth_dst)) {
2582                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2583                 entry->key.term_mac.goto_tbl =
2584                          ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2585         } else {
2586                 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2587                 entry->key.term_mac.goto_tbl =
2588                          ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2589         }
2590
2591         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2592         entry->key.term_mac.in_pport = in_pport;
2593         entry->key.term_mac.in_pport_mask = in_pport_mask;
2594         entry->key.term_mac.eth_type = eth_type;
2595         ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2596         ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2597         entry->key.term_mac.vlan_id = vlan_id;
2598         entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2599         entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2600
2601         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2602 }
2603
2604 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2605                                   struct switchdev_trans *trans, int flags,
2606                                   const u8 *eth_dst, const u8 *eth_dst_mask,
2607                                   __be16 vlan_id, u32 tunnel_id,
2608                                   enum rocker_of_dpa_table_id goto_tbl,
2609                                   u32 group_id, bool copy_to_cpu)
2610 {
2611         struct rocker_flow_tbl_entry *entry;
2612         u32 priority;
2613         bool vlan_bridging = !!vlan_id;
2614         bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2615         bool wild = false;
2616
2617         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2618         if (!entry)
2619                 return -ENOMEM;
2620
2621         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2622
2623         if (eth_dst) {
2624                 entry->key.bridge.has_eth_dst = 1;
2625                 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2626         }
2627         if (eth_dst_mask) {
2628                 entry->key.bridge.has_eth_dst_mask = 1;
2629                 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2630                 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2631                         wild = true;
2632         }
2633
2634         priority = ROCKER_PRIORITY_UNKNOWN;
2635         if (vlan_bridging && dflt && wild)
2636                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2637         else if (vlan_bridging && dflt && !wild)
2638                 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2639         else if (vlan_bridging && !dflt)
2640                 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2641         else if (!vlan_bridging && dflt && wild)
2642                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2643         else if (!vlan_bridging && dflt && !wild)
2644                 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2645         else if (!vlan_bridging && !dflt)
2646                 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2647
2648         entry->key.priority = priority;
2649         entry->key.bridge.vlan_id = vlan_id;
2650         entry->key.bridge.tunnel_id = tunnel_id;
2651         entry->key.bridge.goto_tbl = goto_tbl;
2652         entry->key.bridge.group_id = group_id;
2653         entry->key.bridge.copy_to_cpu = copy_to_cpu;
2654
2655         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2656 }
2657
2658 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2659                                           struct switchdev_trans *trans,
2660                                           __be16 eth_type, __be32 dst,
2661                                           __be32 dst_mask, u32 priority,
2662                                           enum rocker_of_dpa_table_id goto_tbl,
2663                                           u32 group_id, int flags)
2664 {
2665         struct rocker_flow_tbl_entry *entry;
2666
2667         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2668         if (!entry)
2669                 return -ENOMEM;
2670
2671         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2672         entry->key.priority = priority;
2673         entry->key.ucast_routing.eth_type = eth_type;
2674         entry->key.ucast_routing.dst4 = dst;
2675         entry->key.ucast_routing.dst4_mask = dst_mask;
2676         entry->key.ucast_routing.goto_tbl = goto_tbl;
2677         entry->key.ucast_routing.group_id = group_id;
2678         entry->key_len = offsetof(struct rocker_flow_tbl_key,
2679                                   ucast_routing.group_id);
2680
2681         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2682 }
2683
2684 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2685                                struct switchdev_trans *trans, int flags,
2686                                u32 in_pport, u32 in_pport_mask,
2687                                const u8 *eth_src, const u8 *eth_src_mask,
2688                                const u8 *eth_dst, const u8 *eth_dst_mask,
2689                                __be16 eth_type, __be16 vlan_id,
2690                                __be16 vlan_id_mask, u8 ip_proto,
2691                                u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2692                                u32 group_id)
2693 {
2694         u32 priority;
2695         struct rocker_flow_tbl_entry *entry;
2696
2697         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2698         if (!entry)
2699                 return -ENOMEM;
2700
2701         priority = ROCKER_PRIORITY_ACL_NORMAL;
2702         if (eth_dst && eth_dst_mask) {
2703                 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2704                         priority = ROCKER_PRIORITY_ACL_DFLT;
2705                 else if (is_link_local_ether_addr(eth_dst))
2706                         priority = ROCKER_PRIORITY_ACL_CTRL;
2707         }
2708
2709         entry->key.priority = priority;
2710         entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2711         entry->key.acl.in_pport = in_pport;
2712         entry->key.acl.in_pport_mask = in_pport_mask;
2713
2714         if (eth_src)
2715                 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2716         if (eth_src_mask)
2717                 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2718         if (eth_dst)
2719                 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2720         if (eth_dst_mask)
2721                 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2722
2723         entry->key.acl.eth_type = eth_type;
2724         entry->key.acl.vlan_id = vlan_id;
2725         entry->key.acl.vlan_id_mask = vlan_id_mask;
2726         entry->key.acl.ip_proto = ip_proto;
2727         entry->key.acl.ip_proto_mask = ip_proto_mask;
2728         entry->key.acl.ip_tos = ip_tos;
2729         entry->key.acl.ip_tos_mask = ip_tos_mask;
2730         entry->key.acl.group_id = group_id;
2731
2732         return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2733 }
2734
2735 static struct rocker_group_tbl_entry *
2736 rocker_group_tbl_find(const struct rocker *rocker,
2737                       const struct rocker_group_tbl_entry *match)
2738 {
2739         struct rocker_group_tbl_entry *found;
2740
2741         hash_for_each_possible(rocker->group_tbl, found,
2742                                entry, match->group_id) {
2743                 if (found->group_id == match->group_id)
2744                         return found;
2745         }
2746
2747         return NULL;
2748 }
2749
2750 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2751                                         struct rocker_group_tbl_entry *entry)
2752 {
2753         switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2754         case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2755         case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2756                 rocker_port_kfree(trans, entry->group_ids);
2757                 break;
2758         default:
2759                 break;
2760         }
2761         rocker_port_kfree(trans, entry);
2762 }
2763
2764 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2765                                 struct switchdev_trans *trans, int flags,
2766                                 struct rocker_group_tbl_entry *match)
2767 {
2768         struct rocker *rocker = rocker_port->rocker;
2769         struct rocker_group_tbl_entry *found;
2770         unsigned long lock_flags;
2771
2772         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2773
2774         found = rocker_group_tbl_find(rocker, match);
2775
2776         if (found) {
2777                 if (!switchdev_trans_ph_prepare(trans))
2778                         hash_del(&found->entry);
2779                 rocker_group_tbl_entry_free(trans, found);
2780                 found = match;
2781                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2782         } else {
2783                 found = match;
2784                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2785         }
2786
2787         if (!switchdev_trans_ph_prepare(trans))
2788                 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2789
2790         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2791
2792         return rocker_cmd_exec(rocker_port, trans, flags,
2793                                rocker_cmd_group_tbl_add, found, NULL, NULL);
2794 }
2795
2796 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2797                                 struct switchdev_trans *trans, int flags,
2798                                 struct rocker_group_tbl_entry *match)
2799 {
2800         struct rocker *rocker = rocker_port->rocker;
2801         struct rocker_group_tbl_entry *found;
2802         unsigned long lock_flags;
2803         int err = 0;
2804
2805         spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2806
2807         found = rocker_group_tbl_find(rocker, match);
2808
2809         if (found) {
2810                 if (!switchdev_trans_ph_prepare(trans))
2811                         hash_del(&found->entry);
2812                 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2813         }
2814
2815         spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2816
2817         rocker_group_tbl_entry_free(trans, match);
2818
2819         if (found) {
2820                 err = rocker_cmd_exec(rocker_port, trans, flags,
2821                                       rocker_cmd_group_tbl_del,
2822                                       found, NULL, NULL);
2823                 rocker_group_tbl_entry_free(trans, found);
2824         }
2825
2826         return err;
2827 }
2828
2829 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2830                                struct switchdev_trans *trans, int flags,
2831                                struct rocker_group_tbl_entry *entry)
2832 {
2833         if (flags & ROCKER_OP_FLAG_REMOVE)
2834                 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2835         else
2836                 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2837 }
2838
2839 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2840                                      struct switchdev_trans *trans, int flags,
2841                                      __be16 vlan_id, u32 out_pport,
2842                                      int pop_vlan)
2843 {
2844         struct rocker_group_tbl_entry *entry;
2845
2846         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2847         if (!entry)
2848                 return -ENOMEM;
2849
2850         entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2851         entry->l2_interface.pop_vlan = pop_vlan;
2852
2853         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2854 }
2855
2856 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2857                                    struct switchdev_trans *trans,
2858                                    int flags, u8 group_count,
2859                                    const u32 *group_ids, u32 group_id)
2860 {
2861         struct rocker_group_tbl_entry *entry;
2862
2863         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2864         if (!entry)
2865                 return -ENOMEM;
2866
2867         entry->group_id = group_id;
2868         entry->group_count = group_count;
2869
2870         entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
2871                                                group_count, sizeof(u32));
2872         if (!entry->group_ids) {
2873                 rocker_port_kfree(trans, entry);
2874                 return -ENOMEM;
2875         }
2876         memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2877
2878         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2879 }
2880
2881 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2882                                  struct switchdev_trans *trans, int flags,
2883                                  __be16 vlan_id, u8 group_count,
2884                                  const u32 *group_ids, u32 group_id)
2885 {
2886         return rocker_group_l2_fan_out(rocker_port, trans, flags,
2887                                        group_count, group_ids,
2888                                        group_id);
2889 }
2890
2891 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2892                                    struct switchdev_trans *trans, int flags,
2893                                    u32 index, const u8 *src_mac, const u8 *dst_mac,
2894                                    __be16 vlan_id, bool ttl_check, u32 pport)
2895 {
2896         struct rocker_group_tbl_entry *entry;
2897
2898         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2899         if (!entry)
2900                 return -ENOMEM;
2901
2902         entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2903         if (src_mac)
2904                 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2905         if (dst_mac)
2906                 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2907         entry->l3_unicast.vlan_id = vlan_id;
2908         entry->l3_unicast.ttl_check = ttl_check;
2909         entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2910
2911         return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2912 }
2913
2914 static struct rocker_neigh_tbl_entry *
2915 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2916 {
2917         struct rocker_neigh_tbl_entry *found;
2918
2919         hash_for_each_possible(rocker->neigh_tbl, found,
2920                                entry, be32_to_cpu(ip_addr))
2921                 if (found->ip_addr == ip_addr)
2922                         return found;
2923
2924         return NULL;
2925 }
2926
2927 static void _rocker_neigh_add(struct rocker *rocker,
2928                               struct switchdev_trans *trans,
2929                               struct rocker_neigh_tbl_entry *entry)
2930 {
2931         if (!switchdev_trans_ph_commit(trans))
2932                 entry->index = rocker->neigh_tbl_next_index++;
2933         if (switchdev_trans_ph_prepare(trans))
2934                 return;
2935         entry->ref_count++;
2936         hash_add(rocker->neigh_tbl, &entry->entry,
2937                  be32_to_cpu(entry->ip_addr));
2938 }
2939
2940 static void _rocker_neigh_del(struct switchdev_trans *trans,
2941                               struct rocker_neigh_tbl_entry *entry)
2942 {
2943         if (switchdev_trans_ph_prepare(trans))
2944                 return;
2945         if (--entry->ref_count == 0) {
2946                 hash_del(&entry->entry);
2947                 rocker_port_kfree(trans, entry);
2948         }
2949 }
2950
2951 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
2952                                  struct switchdev_trans *trans,
2953                                  const u8 *eth_dst, bool ttl_check)
2954 {
2955         if (eth_dst) {
2956                 ether_addr_copy(entry->eth_dst, eth_dst);
2957                 entry->ttl_check = ttl_check;
2958         } else if (!switchdev_trans_ph_prepare(trans)) {
2959                 entry->ref_count++;
2960         }
2961 }
2962
2963 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2964                                   struct switchdev_trans *trans,
2965                                   int flags, __be32 ip_addr, const u8 *eth_dst)
2966 {
2967         struct rocker *rocker = rocker_port->rocker;
2968         struct rocker_neigh_tbl_entry *entry;
2969         struct rocker_neigh_tbl_entry *found;
2970         unsigned long lock_flags;
2971         __be16 eth_type = htons(ETH_P_IP);
2972         enum rocker_of_dpa_table_id goto_tbl =
2973                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2974         u32 group_id;
2975         u32 priority = 0;
2976         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2977         bool updating;
2978         bool removing;
2979         int err = 0;
2980
2981         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2982         if (!entry)
2983                 return -ENOMEM;
2984
2985         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2986
2987         found = rocker_neigh_tbl_find(rocker, ip_addr);
2988
2989         updating = found && adding;
2990         removing = found && !adding;
2991         adding = !found && adding;
2992
2993         if (adding) {
2994                 entry->ip_addr = ip_addr;
2995                 entry->dev = rocker_port->dev;
2996                 ether_addr_copy(entry->eth_dst, eth_dst);
2997                 entry->ttl_check = true;
2998                 _rocker_neigh_add(rocker, trans, entry);
2999         } else if (removing) {
3000                 memcpy(entry, found, sizeof(*entry));
3001                 _rocker_neigh_del(trans, found);
3002         } else if (updating) {
3003                 _rocker_neigh_update(found, trans, eth_dst, true);
3004                 memcpy(entry, found, sizeof(*entry));
3005         } else {
3006                 err = -ENOENT;
3007         }
3008
3009         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3010
3011         if (err)
3012                 goto err_out;
3013
3014         /* For each active neighbor, we have an L3 unicast group and
3015          * a /32 route to the neighbor, which uses the L3 unicast
3016          * group.  The L3 unicast group can also be referred to by
3017          * other routes' nexthops.
3018          */
3019
3020         err = rocker_group_l3_unicast(rocker_port, trans, flags,
3021                                       entry->index,
3022                                       rocker_port->dev->dev_addr,
3023                                       entry->eth_dst,
3024                                       rocker_port->internal_vlan_id,
3025                                       entry->ttl_check,
3026                                       rocker_port->pport);
3027         if (err) {
3028                 netdev_err(rocker_port->dev,
3029                            "Error (%d) L3 unicast group index %d\n",
3030                            err, entry->index);
3031                 goto err_out;
3032         }
3033
3034         if (adding || removing) {
3035                 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3036                 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3037                                                      eth_type, ip_addr,
3038                                                      inet_make_mask(32),
3039                                                      priority, goto_tbl,
3040                                                      group_id, flags);
3041
3042                 if (err)
3043                         netdev_err(rocker_port->dev,
3044                                    "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3045                                    err, &entry->ip_addr, group_id);
3046         }
3047
3048 err_out:
3049         if (!adding)
3050                 rocker_port_kfree(trans, entry);
3051
3052         return err;
3053 }
3054
3055 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3056                                     struct switchdev_trans *trans,
3057                                     __be32 ip_addr)
3058 {
3059         struct net_device *dev = rocker_port->dev;
3060         struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3061         int err = 0;
3062
3063         if (!n) {
3064                 n = neigh_create(&arp_tbl, &ip_addr, dev);
3065                 if (IS_ERR(n))
3066                         return IS_ERR(n);
3067         }
3068
3069         /* If the neigh is already resolved, then go ahead and
3070          * install the entry, otherwise start the ARP process to
3071          * resolve the neigh.
3072          */
3073
3074         if (n->nud_state & NUD_VALID)
3075                 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3076                                              ip_addr, n->ha);
3077         else
3078                 neigh_event_send(n, NULL);
3079
3080         neigh_release(n);
3081         return err;
3082 }
3083
3084 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3085                                struct switchdev_trans *trans, int flags,
3086                                __be32 ip_addr, u32 *index)
3087 {
3088         struct rocker *rocker = rocker_port->rocker;
3089         struct rocker_neigh_tbl_entry *entry;
3090         struct rocker_neigh_tbl_entry *found;
3091         unsigned long lock_flags;
3092         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3093         bool updating;
3094         bool removing;
3095         bool resolved = true;
3096         int err = 0;
3097
3098         entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
3099         if (!entry)
3100                 return -ENOMEM;
3101
3102         spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3103
3104         found = rocker_neigh_tbl_find(rocker, ip_addr);
3105         if (found)
3106                 *index = found->index;
3107
3108         updating = found && adding;
3109         removing = found && !adding;
3110         adding = !found && adding;
3111
3112         if (adding) {
3113                 entry->ip_addr = ip_addr;
3114                 entry->dev = rocker_port->dev;
3115                 _rocker_neigh_add(rocker, trans, entry);
3116                 *index = entry->index;
3117                 resolved = false;
3118         } else if (removing) {
3119                 _rocker_neigh_del(trans, found);
3120         } else if (updating) {
3121                 _rocker_neigh_update(found, trans, NULL, false);
3122                 resolved = !is_zero_ether_addr(found->eth_dst);
3123         } else {
3124                 err = -ENOENT;
3125         }
3126
3127         spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3128
3129         if (!adding)
3130                 rocker_port_kfree(trans, entry);
3131
3132         if (err)
3133                 return err;
3134
3135         /* Resolved means neigh ip_addr is resolved to neigh mac. */
3136
3137         if (!resolved)
3138                 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3139
3140         return err;
3141 }
3142
3143 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3144                                         struct switchdev_trans *trans,
3145                                         int flags, __be16 vlan_id)
3146 {
3147         struct rocker_port *p;
3148         const struct rocker *rocker = rocker_port->rocker;
3149         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3150         u32 *group_ids;
3151         u8 group_count = 0;
3152         int err = 0;
3153         int i;
3154
3155         group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
3156                                         rocker->port_count, sizeof(u32));
3157         if (!group_ids)
3158                 return -ENOMEM;
3159
3160         /* Adjust the flood group for this VLAN.  The flood group
3161          * references an L2 interface group for each port in this
3162          * VLAN.
3163          */
3164
3165         for (i = 0; i < rocker->port_count; i++) {
3166                 p = rocker->ports[i];
3167                 if (!p)
3168                         continue;
3169                 if (!rocker_port_is_bridged(p))
3170                         continue;
3171                 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3172                         group_ids[group_count++] =
3173                                 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3174                 }
3175         }
3176
3177         /* If there are no bridged ports in this VLAN, we're done */
3178         if (group_count == 0)
3179                 goto no_ports_in_vlan;
3180
3181         err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3182                                     group_count, group_ids, group_id);
3183         if (err)
3184                 netdev_err(rocker_port->dev,
3185                            "Error (%d) port VLAN l2 flood group\n", err);
3186
3187 no_ports_in_vlan:
3188         rocker_port_kfree(trans, group_ids);
3189         return err;
3190 }
3191
3192 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3193                                       struct switchdev_trans *trans, int flags,
3194                                       __be16 vlan_id, bool pop_vlan)
3195 {
3196         const struct rocker *rocker = rocker_port->rocker;
3197         struct rocker_port *p;
3198         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3199         u32 out_pport;
3200         int ref = 0;
3201         int err;
3202         int i;
3203
3204         /* An L2 interface group for this port in this VLAN, but
3205          * only when port STP state is LEARNING|FORWARDING.
3206          */
3207
3208         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3209             rocker_port->stp_state == BR_STATE_FORWARDING) {
3210                 out_pport = rocker_port->pport;
3211                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3212                                                 vlan_id, out_pport, pop_vlan);
3213                 if (err) {
3214                         netdev_err(rocker_port->dev,
3215                                    "Error (%d) port VLAN l2 group for pport %d\n",
3216                                    err, out_pport);
3217                         return err;
3218                 }
3219         }
3220
3221         /* An L2 interface group for this VLAN to CPU port.
3222          * Add when first port joins this VLAN and destroy when
3223          * last port leaves this VLAN.
3224          */
3225
3226         for (i = 0; i < rocker->port_count; i++) {
3227                 p = rocker->ports[i];
3228                 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3229                         ref++;
3230         }
3231
3232         if ((!adding || ref != 1) && (adding || ref != 0))
3233                 return 0;
3234
3235         out_pport = 0;
3236         err = rocker_group_l2_interface(rocker_port, trans, flags,
3237                                         vlan_id, out_pport, pop_vlan);
3238         if (err) {
3239                 netdev_err(rocker_port->dev,
3240                            "Error (%d) port VLAN l2 group for CPU port\n", err);
3241                 return err;
3242         }
3243
3244         return 0;
3245 }
3246
3247 static struct rocker_ctrl {
3248         const u8 *eth_dst;
3249         const u8 *eth_dst_mask;
3250         __be16 eth_type;
3251         bool acl;
3252         bool bridge;
3253         bool term;
3254         bool copy_to_cpu;
3255 } rocker_ctrls[] = {
3256         [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3257                 /* pass link local multicast pkts up to CPU for filtering */
3258                 .eth_dst = ll_mac,
3259                 .eth_dst_mask = ll_mask,
3260                 .acl = true,
3261         },
3262         [ROCKER_CTRL_LOCAL_ARP] = {
3263                 /* pass local ARP pkts up to CPU */
3264                 .eth_dst = zero_mac,
3265                 .eth_dst_mask = zero_mac,
3266                 .eth_type = htons(ETH_P_ARP),
3267                 .acl = true,
3268         },
3269         [ROCKER_CTRL_IPV4_MCAST] = {
3270                 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3271                 .eth_dst = ipv4_mcast,
3272                 .eth_dst_mask = ipv4_mask,
3273                 .eth_type = htons(ETH_P_IP),
3274                 .term  = true,
3275                 .copy_to_cpu = true,
3276         },
3277         [ROCKER_CTRL_IPV6_MCAST] = {
3278                 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3279                 .eth_dst = ipv6_mcast,
3280                 .eth_dst_mask = ipv6_mask,
3281                 .eth_type = htons(ETH_P_IPV6),
3282                 .term  = true,
3283                 .copy_to_cpu = true,
3284         },
3285         [ROCKER_CTRL_DFLT_BRIDGING] = {
3286                 /* flood any pkts on vlan */
3287                 .bridge = true,
3288                 .copy_to_cpu = true,
3289         },
3290         [ROCKER_CTRL_DFLT_OVS] = {
3291                 /* pass all pkts up to CPU */
3292                 .eth_dst = zero_mac,
3293                 .eth_dst_mask = zero_mac,
3294                 .acl = true,
3295         },
3296 };
3297
3298 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3299                                      struct switchdev_trans *trans, int flags,
3300                                      const struct rocker_ctrl *ctrl, __be16 vlan_id)
3301 {
3302         u32 in_pport = rocker_port->pport;
3303         u32 in_pport_mask = 0xffffffff;
3304         u32 out_pport = 0;
3305         const u8 *eth_src = NULL;
3306         const u8 *eth_src_mask = NULL;
3307         __be16 vlan_id_mask = htons(0xffff);
3308         u8 ip_proto = 0;
3309         u8 ip_proto_mask = 0;
3310         u8 ip_tos = 0;
3311         u8 ip_tos_mask = 0;
3312         u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3313         int err;
3314
3315         err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3316                                   in_pport, in_pport_mask,
3317                                   eth_src, eth_src_mask,
3318                                   ctrl->eth_dst, ctrl->eth_dst_mask,
3319                                   ctrl->eth_type,
3320                                   vlan_id, vlan_id_mask,
3321                                   ip_proto, ip_proto_mask,
3322                                   ip_tos, ip_tos_mask,
3323                                   group_id);
3324
3325         if (err)
3326                 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3327
3328         return err;
3329 }
3330
3331 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3332                                         struct switchdev_trans *trans,
3333                                         int flags,
3334                                         const struct rocker_ctrl *ctrl,
3335                                         __be16 vlan_id)
3336 {
3337         enum rocker_of_dpa_table_id goto_tbl =
3338                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3339         u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3340         u32 tunnel_id = 0;
3341         int err;
3342
3343         if (!rocker_port_is_bridged(rocker_port))
3344                 return 0;
3345
3346         err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3347                                      ctrl->eth_dst, ctrl->eth_dst_mask,
3348                                      vlan_id, tunnel_id,
3349                                      goto_tbl, group_id, ctrl->copy_to_cpu);
3350
3351         if (err)
3352                 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3353
3354         return err;
3355 }
3356
3357 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3358                                       struct switchdev_trans *trans, int flags,
3359                                       const struct rocker_ctrl *ctrl, __be16 vlan_id)
3360 {
3361         u32 in_pport_mask = 0xffffffff;
3362         __be16 vlan_id_mask = htons(0xffff);
3363         int err;
3364
3365         if (ntohs(vlan_id) == 0)
3366                 vlan_id = rocker_port->internal_vlan_id;
3367
3368         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3369                                        rocker_port->pport, in_pport_mask,
3370                                        ctrl->eth_type, ctrl->eth_dst,
3371                                        ctrl->eth_dst_mask, vlan_id,
3372                                        vlan_id_mask, ctrl->copy_to_cpu,
3373                                        flags);
3374
3375         if (err)
3376                 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3377
3378         return err;
3379 }
3380
3381 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3382                                  struct switchdev_trans *trans, int flags,
3383                                  const struct rocker_ctrl *ctrl, __be16 vlan_id)
3384 {
3385         if (ctrl->acl)
3386                 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3387                                                  ctrl, vlan_id);
3388         if (ctrl->bridge)
3389                 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3390                                                     ctrl, vlan_id);
3391
3392         if (ctrl->term)
3393                 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3394                                                   ctrl, vlan_id);
3395
3396         return -EOPNOTSUPP;
3397 }
3398
3399 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3400                                      struct switchdev_trans *trans, int flags,
3401                                      __be16 vlan_id)
3402 {
3403         int err = 0;
3404         int i;
3405
3406         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3407                 if (rocker_port->ctrls[i]) {
3408                         err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3409                                                     &rocker_ctrls[i], vlan_id);
3410                         if (err)
3411                                 return err;
3412                 }
3413         }
3414
3415         return err;
3416 }
3417
3418 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3419                             struct switchdev_trans *trans, int flags,
3420                             const struct rocker_ctrl *ctrl)
3421 {
3422         u16 vid;
3423         int err = 0;
3424
3425         for (vid = 1; vid < VLAN_N_VID; vid++) {
3426                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3427                         continue;
3428                 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3429                                             ctrl, htons(vid));
3430                 if (err)
3431                         break;
3432         }
3433
3434         return err;
3435 }
3436
3437 static int rocker_port_vlan(struct rocker_port *rocker_port,
3438                             struct switchdev_trans *trans, int flags, u16 vid)
3439 {
3440         enum rocker_of_dpa_table_id goto_tbl =
3441                 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3442         u32 in_pport = rocker_port->pport;
3443         __be16 vlan_id = htons(vid);
3444         __be16 vlan_id_mask = htons(0xffff);
3445         __be16 internal_vlan_id;
3446         bool untagged;
3447         bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3448         int err;
3449
3450         internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3451
3452         if (adding && test_bit(ntohs(internal_vlan_id),
3453                                rocker_port->vlan_bitmap))
3454                         return 0; /* already added */
3455         else if (!adding && !test_bit(ntohs(internal_vlan_id),
3456                                       rocker_port->vlan_bitmap))
3457                         return 0; /* already removed */
3458
3459         change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3460
3461         if (adding) {
3462                 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3463                                                 internal_vlan_id);
3464                 if (err) {
3465                         netdev_err(rocker_port->dev,
3466                                    "Error (%d) port ctrl vlan add\n", err);
3467                         goto err_out;
3468                 }
3469         }
3470
3471         err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3472                                          internal_vlan_id, untagged);
3473         if (err) {
3474                 netdev_err(rocker_port->dev,
3475                            "Error (%d) port VLAN l2 groups\n", err);
3476                 goto err_out;
3477         }
3478
3479         err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3480                                            internal_vlan_id);
3481         if (err) {
3482                 netdev_err(rocker_port->dev,
3483                            "Error (%d) port VLAN l2 flood group\n", err);
3484                 goto err_out;
3485         }
3486
3487         err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3488                                    in_pport, vlan_id, vlan_id_mask,
3489                                    goto_tbl, untagged, internal_vlan_id);
3490         if (err)
3491                 netdev_err(rocker_port->dev,
3492                            "Error (%d) port VLAN table\n", err);
3493
3494 err_out:
3495         if (switchdev_trans_ph_prepare(trans))
3496                 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3497
3498         return err;
3499 }
3500
3501 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3502                               struct switchdev_trans *trans, int flags)
3503 {
3504         enum rocker_of_dpa_table_id goto_tbl;
3505         u32 in_pport;
3506         u32 in_pport_mask;
3507         int err;
3508
3509         /* Normal Ethernet Frames.  Matches pkts from any local physical
3510          * ports.  Goto VLAN tbl.
3511          */
3512
3513         in_pport = 0;
3514         in_pport_mask = 0xffff0000;
3515         goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3516
3517         err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3518                                       in_pport, in_pport_mask,
3519                                       goto_tbl);
3520         if (err)
3521                 netdev_err(rocker_port->dev,
3522                            "Error (%d) ingress port table entry\n", err);
3523
3524         return err;
3525 }
3526
3527 struct rocker_fdb_learn_work {
3528         struct work_struct work;
3529         struct rocker_port *rocker_port;
3530         struct switchdev_trans *trans;
3531         int flags;
3532         u8 addr[ETH_ALEN];
3533         u16 vid;
3534 };
3535
3536 static void rocker_port_fdb_learn_work(struct work_struct *work)
3537 {
3538         const struct rocker_fdb_learn_work *lw =
3539                 container_of(work, struct rocker_fdb_learn_work, work);
3540         bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3541         bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3542         struct switchdev_notifier_fdb_info info;
3543
3544         info.addr = lw->addr;
3545         info.vid = lw->vid;
3546
3547         rtnl_lock();
3548         if (learned && removing)
3549                 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3550                                          lw->rocker_port->dev, &info.info);
3551         else if (learned && !removing)
3552                 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3553                                          lw->rocker_port->dev, &info.info);
3554         rtnl_unlock();
3555
3556         rocker_port_kfree(lw->trans, work);
3557 }
3558
3559 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3560                                  struct switchdev_trans *trans, int flags,
3561                                  const u8 *addr, __be16 vlan_id)
3562 {
3563         struct rocker_fdb_learn_work *lw;
3564         enum rocker_of_dpa_table_id goto_tbl =
3565                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3566         u32 out_pport = rocker_port->pport;
3567         u32 tunnel_id = 0;
3568         u32 group_id = ROCKER_GROUP_NONE;
3569         bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3570         bool copy_to_cpu = false;
3571         int err;
3572
3573         if (rocker_port_is_bridged(rocker_port))
3574                 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3575
3576         if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3577                 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3578                                              NULL, vlan_id, tunnel_id, goto_tbl,
3579                                              group_id, copy_to_cpu);
3580                 if (err)
3581                         return err;
3582         }
3583
3584         if (!syncing)
3585                 return 0;
3586
3587         if (!rocker_port_is_bridged(rocker_port))
3588                 return 0;
3589
3590         lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
3591         if (!lw)
3592                 return -ENOMEM;
3593
3594         INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3595
3596         lw->rocker_port = rocker_port;
3597         lw->trans = trans;
3598         lw->flags = flags;
3599         ether_addr_copy(lw->addr, addr);
3600         lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3601
3602         if (switchdev_trans_ph_prepare(trans))
3603                 rocker_port_kfree(trans, lw);
3604         else
3605                 schedule_work(&lw->work);
3606
3607         return 0;
3608 }
3609
3610 static struct rocker_fdb_tbl_entry *
3611 rocker_fdb_tbl_find(const struct rocker *rocker,
3612                     const struct rocker_fdb_tbl_entry *match)
3613 {
3614         struct rocker_fdb_tbl_entry *found;
3615
3616         hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3617                 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3618                         return found;
3619
3620         return NULL;
3621 }
3622
3623 static int rocker_port_fdb(struct rocker_port *rocker_port,
3624                            struct switchdev_trans *trans,
3625                            const unsigned char *addr,
3626                            __be16 vlan_id, int flags)
3627 {
3628         struct rocker *rocker = rocker_port->rocker;
3629         struct rocker_fdb_tbl_entry *fdb;
3630         struct rocker_fdb_tbl_entry *found;
3631         bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3632         unsigned long lock_flags;
3633
3634         fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
3635         if (!fdb)
3636                 return -ENOMEM;
3637
3638         fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3639         fdb->touched = jiffies;
3640         fdb->key.rocker_port = rocker_port;
3641         ether_addr_copy(fdb->key.addr, addr);
3642         fdb->key.vlan_id = vlan_id;
3643         fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3644
3645         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3646
3647         found = rocker_fdb_tbl_find(rocker, fdb);
3648
3649         if (found) {
3650                 found->touched = jiffies;
3651                 if (removing) {
3652                         rocker_port_kfree(trans, fdb);
3653                         if (!switchdev_trans_ph_prepare(trans))
3654                                 hash_del(&found->entry);
3655                 }
3656         } else if (!removing) {
3657                 if (!switchdev_trans_ph_prepare(trans))
3658                         hash_add(rocker->fdb_tbl, &fdb->entry,
3659                                  fdb->key_crc32);
3660         }
3661
3662         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3663
3664         /* Check if adding and already exists, or removing and can't find */
3665         if (!found != !removing) {
3666                 rocker_port_kfree(trans, fdb);
3667                 if (!found && removing)
3668                         return 0;
3669                 /* Refreshing existing to update aging timers */
3670                 flags |= ROCKER_OP_FLAG_REFRESH;
3671         }
3672
3673         return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3674 }
3675
3676 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3677                                  struct switchdev_trans *trans, int flags)
3678 {
3679         struct rocker *rocker = rocker_port->rocker;
3680         struct rocker_fdb_tbl_entry *found;
3681         unsigned long lock_flags;
3682         struct hlist_node *tmp;
3683         int bkt;
3684         int err = 0;
3685
3686         if (rocker_port->stp_state == BR_STATE_LEARNING ||
3687             rocker_port->stp_state == BR_STATE_FORWARDING)
3688                 return 0;
3689
3690         flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3691
3692         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3693
3694         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3695                 if (found->key.rocker_port != rocker_port)
3696                         continue;
3697                 if (!found->learned)
3698                         continue;
3699                 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3700                                             found->key.addr,
3701                                             found->key.vlan_id);
3702                 if (err)
3703                         goto err_out;
3704                 if (!switchdev_trans_ph_prepare(trans))
3705                         hash_del(&found->entry);
3706         }
3707
3708 err_out:
3709         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3710
3711         return err;
3712 }
3713
3714 static void rocker_fdb_cleanup(unsigned long data)
3715 {
3716         struct rocker *rocker = (struct rocker *)data;
3717         struct rocker_port *rocker_port;
3718         struct rocker_fdb_tbl_entry *entry;
3719         struct hlist_node *tmp;
3720         unsigned long next_timer = jiffies + rocker->ageing_time;
3721         unsigned long expires;
3722         unsigned long lock_flags;
3723         int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3724                     ROCKER_OP_FLAG_LEARNED;
3725         int bkt;
3726
3727         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3728
3729         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3730                 if (!entry->learned)
3731                         continue;
3732                 rocker_port = entry->key.rocker_port;
3733                 expires = entry->touched + rocker_port->ageing_time;
3734                 if (time_before_eq(expires, jiffies)) {
3735                         rocker_port_fdb_learn(rocker_port, NULL,
3736                                               flags, entry->key.addr,
3737                                               entry->key.vlan_id);
3738                         hash_del(&entry->entry);
3739                 } else if (time_before(expires, next_timer)) {
3740                         next_timer = expires;
3741                 }
3742         }
3743
3744         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3745
3746         mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3747 }
3748
3749 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3750                                   struct switchdev_trans *trans, int flags,
3751                                   __be16 vlan_id)
3752 {
3753         u32 in_pport_mask = 0xffffffff;
3754         __be16 eth_type;
3755         const u8 *dst_mac_mask = ff_mac;
3756         __be16 vlan_id_mask = htons(0xffff);
3757         bool copy_to_cpu = false;
3758         int err;
3759
3760         if (ntohs(vlan_id) == 0)
3761                 vlan_id = rocker_port->internal_vlan_id;
3762
3763         eth_type = htons(ETH_P_IP);
3764         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3765                                        rocker_port->pport, in_pport_mask,
3766                                        eth_type, rocker_port->dev->dev_addr,
3767                                        dst_mac_mask, vlan_id, vlan_id_mask,
3768                                        copy_to_cpu, flags);
3769         if (err)
3770                 return err;
3771
3772         eth_type = htons(ETH_P_IPV6);
3773         err = rocker_flow_tbl_term_mac(rocker_port, trans,
3774                                        rocker_port->pport, in_pport_mask,
3775                                        eth_type, rocker_port->dev->dev_addr,
3776                                        dst_mac_mask, vlan_id, vlan_id_mask,
3777                                        copy_to_cpu, flags);
3778
3779         return err;
3780 }
3781
3782 static int rocker_port_fwding(struct rocker_port *rocker_port,
3783                               struct switchdev_trans *trans, int flags)
3784 {
3785         bool pop_vlan;
3786         u32 out_pport;
3787         __be16 vlan_id;
3788         u16 vid;
3789         int err;
3790
3791         /* Port will be forwarding-enabled if its STP state is LEARNING
3792          * or FORWARDING.  Traffic from CPU can still egress, regardless of
3793          * port STP state.  Use L2 interface group on port VLANs as a way
3794          * to toggle port forwarding: if forwarding is disabled, L2
3795          * interface group will not exist.
3796          */
3797
3798         if (rocker_port->stp_state != BR_STATE_LEARNING &&
3799             rocker_port->stp_state != BR_STATE_FORWARDING)
3800                 flags |= ROCKER_OP_FLAG_REMOVE;
3801
3802         out_pport = rocker_port->pport;
3803         for (vid = 1; vid < VLAN_N_VID; vid++) {
3804                 if (!test_bit(vid, rocker_port->vlan_bitmap))
3805                         continue;
3806                 vlan_id = htons(vid);
3807                 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3808                 err = rocker_group_l2_interface(rocker_port, trans, flags,
3809                                                 vlan_id, out_pport, pop_vlan);
3810                 if (err) {
3811                         netdev_err(rocker_port->dev,
3812                                    "Error (%d) port VLAN l2 group for pport %d\n",
3813                                    err, out_pport);
3814                         return err;
3815                 }
3816         }
3817
3818         return 0;
3819 }
3820
3821 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3822                                   struct switchdev_trans *trans, int flags,
3823                                   u8 state)
3824 {
3825         bool want[ROCKER_CTRL_MAX] = { 0, };
3826         bool prev_ctrls[ROCKER_CTRL_MAX];
3827         u8 uninitialized_var(prev_state);
3828         int err;
3829         int i;
3830
3831         if (switchdev_trans_ph_prepare(trans)) {
3832                 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3833                 prev_state = rocker_port->stp_state;
3834         }
3835
3836         if (rocker_port->stp_state == state)
3837                 return 0;
3838
3839         rocker_port->stp_state = state;
3840
3841         switch (state) {
3842         case BR_STATE_DISABLED:
3843                 /* port is completely disabled */
3844                 break;
3845         case BR_STATE_LISTENING:
3846         case BR_STATE_BLOCKING:
3847                 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3848                 break;
3849         case BR_STATE_LEARNING:
3850         case BR_STATE_FORWARDING:
3851                 if (!rocker_port_is_ovsed(rocker_port))
3852                         want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3853                 want[ROCKER_CTRL_IPV4_MCAST] = true;
3854                 want[ROCKER_CTRL_IPV6_MCAST] = true;
3855                 if (rocker_port_is_bridged(rocker_port))
3856                         want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3857                 else if (rocker_port_is_ovsed(rocker_port))
3858                         want[ROCKER_CTRL_DFLT_OVS] = true;
3859                 else
3860                         want[ROCKER_CTRL_LOCAL_ARP] = true;
3861                 break;
3862         }
3863
3864         for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3865                 if (want[i] != rocker_port->ctrls[i]) {
3866                         int ctrl_flags = flags |
3867                                          (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3868                         err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3869                                                &rocker_ctrls[i]);
3870                         if (err)
3871                                 goto err_out;
3872                         rocker_port->ctrls[i] = want[i];
3873                 }
3874         }
3875
3876         err = rocker_port_fdb_flush(rocker_port, trans, flags);
3877         if (err)
3878                 goto err_out;
3879
3880         err = rocker_port_fwding(rocker_port, trans, flags);
3881
3882 err_out:
3883         if (switchdev_trans_ph_prepare(trans)) {
3884                 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3885                 rocker_port->stp_state = prev_state;
3886         }
3887
3888         return err;
3889 }
3890
3891 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3892                                   struct switchdev_trans *trans, int flags)
3893 {
3894         if (rocker_port_is_bridged(rocker_port))
3895                 /* bridge STP will enable port */
3896                 return 0;
3897
3898         /* port is not bridged, so simulate going to FORWARDING state */
3899         return rocker_port_stp_update(rocker_port, trans, flags,
3900                                       BR_STATE_FORWARDING);
3901 }
3902
3903 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3904                                    struct switchdev_trans *trans, int flags)
3905 {
3906         if (rocker_port_is_bridged(rocker_port))
3907                 /* bridge STP will disable port */
3908                 return 0;
3909
3910         /* port is not bridged, so simulate going to DISABLED state */
3911         return rocker_port_stp_update(rocker_port, trans, flags,
3912                                       BR_STATE_DISABLED);
3913 }
3914
3915 static struct rocker_internal_vlan_tbl_entry *
3916 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3917 {
3918         struct rocker_internal_vlan_tbl_entry *found;
3919
3920         hash_for_each_possible(rocker->internal_vlan_tbl, found,
3921                                entry, ifindex) {
3922                 if (found->ifindex == ifindex)
3923                         return found;
3924         }
3925
3926         return NULL;
3927 }
3928
3929 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3930                                                int ifindex)
3931 {
3932         struct rocker *rocker = rocker_port->rocker;
3933         struct rocker_internal_vlan_tbl_entry *entry;
3934         struct rocker_internal_vlan_tbl_entry *found;
3935         unsigned long lock_flags;
3936         int i;
3937
3938         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3939         if (!entry)
3940                 return 0;
3941
3942         entry->ifindex = ifindex;
3943
3944         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3945
3946         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3947         if (found) {
3948                 kfree(entry);
3949                 goto found;
3950         }
3951
3952         found = entry;
3953         hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3954
3955         for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3956                 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3957                         continue;
3958                 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3959                 goto found;
3960         }
3961
3962         netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3963
3964 found:
3965         found->ref_count++;
3966         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3967
3968         return found->vlan_id;
3969 }
3970
3971 static void
3972 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3973                                  int ifindex)
3974 {
3975         struct rocker *rocker = rocker_port->rocker;
3976         struct rocker_internal_vlan_tbl_entry *found;
3977         unsigned long lock_flags;
3978         unsigned long bit;
3979
3980         spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3981
3982         found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3983         if (!found) {
3984                 netdev_err(rocker_port->dev,
3985                            "ifindex (%d) not found in internal VLAN tbl\n",
3986                            ifindex);
3987                 goto not_found;
3988         }
3989
3990         if (--found->ref_count <= 0) {
3991                 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3992                 clear_bit(bit, rocker->internal_vlan_bitmap);
3993                 hash_del(&found->entry);
3994                 kfree(found);
3995         }
3996
3997 not_found:
3998         spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3999 }
4000
4001 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
4002                                 struct switchdev_trans *trans, __be32 dst,
4003                                 int dst_len, const struct fib_info *fi,
4004                                 u32 tb_id, int flags)
4005 {
4006         const struct fib_nh *nh;
4007         __be16 eth_type = htons(ETH_P_IP);
4008         __be32 dst_mask = inet_make_mask(dst_len);
4009         __be16 internal_vlan_id = rocker_port->internal_vlan_id;
4010         u32 priority = fi->fib_priority;
4011         enum rocker_of_dpa_table_id goto_tbl =
4012                 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4013         u32 group_id;
4014         bool nh_on_port;
4015         bool has_gw;
4016         u32 index;
4017         int err;
4018
4019         /* XXX support ECMP */
4020
4021         nh = fi->fib_nh;
4022         nh_on_port = (fi->fib_dev == rocker_port->dev);
4023         has_gw = !!nh->nh_gw;
4024
4025         if (has_gw && nh_on_port) {
4026                 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
4027                                           nh->nh_gw, &index);
4028                 if (err)
4029                         return err;
4030
4031                 group_id = ROCKER_GROUP_L3_UNICAST(index);
4032         } else {
4033                 /* Send to CPU for processing */
4034                 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4035         }
4036
4037         err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
4038                                              dst_mask, priority, goto_tbl,
4039                                              group_id, flags);
4040         if (err)
4041                 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4042                            err, &dst);
4043
4044         return err;
4045 }
4046
4047 /*****************
4048  * Net device ops
4049  *****************/
4050
4051 static int rocker_port_open(struct net_device *dev)
4052 {
4053         struct rocker_port *rocker_port = netdev_priv(dev);
4054         int err;
4055
4056         err = rocker_port_dma_rings_init(rocker_port);
4057         if (err)
4058                 return err;
4059
4060         err = request_irq(rocker_msix_tx_vector(rocker_port),
4061                           rocker_tx_irq_handler, 0,
4062                           rocker_driver_name, rocker_port);
4063         if (err) {
4064                 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4065                 goto err_request_tx_irq;
4066         }
4067
4068         err = request_irq(rocker_msix_rx_vector(rocker_port),
4069                           rocker_rx_irq_handler, 0,
4070                           rocker_driver_name, rocker_port);
4071         if (err) {
4072                 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4073                 goto err_request_rx_irq;
4074         }
4075
4076         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
4077         if (err)
4078                 goto err_fwd_enable;
4079
4080         napi_enable(&rocker_port->napi_tx);
4081         napi_enable(&rocker_port->napi_rx);
4082         if (!dev->proto_down)
4083                 rocker_port_set_enable(rocker_port, true);
4084         netif_start_queue(dev);
4085         return 0;
4086
4087 err_fwd_enable:
4088         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4089 err_request_rx_irq:
4090         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4091 err_request_tx_irq:
4092         rocker_port_dma_rings_fini(rocker_port);
4093         return err;
4094 }
4095
4096 static int rocker_port_stop(struct net_device *dev)
4097 {
4098         struct rocker_port *rocker_port = netdev_priv(dev);
4099
4100         netif_stop_queue(dev);
4101         rocker_port_set_enable(rocker_port, false);
4102         napi_disable(&rocker_port->napi_rx);
4103         napi_disable(&rocker_port->napi_tx);
4104         rocker_port_fwd_disable(rocker_port, NULL,
4105                                 ROCKER_OP_FLAG_NOWAIT);
4106         free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4107         free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4108         rocker_port_dma_rings_fini(rocker_port);
4109
4110         return 0;
4111 }
4112
4113 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4114                                        const struct rocker_desc_info *desc_info)
4115 {
4116         const struct rocker *rocker = rocker_port->rocker;
4117         struct pci_dev *pdev = rocker->pdev;
4118         const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4119         struct rocker_tlv *attr;
4120         int rem;
4121
4122         rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4123         if (!attrs[ROCKER_TLV_TX_FRAGS])
4124                 return;
4125         rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4126                 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4127                 dma_addr_t dma_handle;
4128                 size_t len;
4129
4130                 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4131                         continue;
4132                 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4133                                         attr);
4134                 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4135                     !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4136                         continue;
4137                 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4138                 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4139                 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4140         }
4141 }
4142
4143 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4144                                        struct rocker_desc_info *desc_info,
4145                                        char *buf, size_t buf_len)
4146 {
4147         const struct rocker *rocker = rocker_port->rocker;
4148         struct pci_dev *pdev = rocker->pdev;
4149         dma_addr_t dma_handle;
4150         struct rocker_tlv *frag;
4151
4152         dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4153         if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4154                 if (net_ratelimit())
4155                         netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4156                 return -EIO;
4157         }
4158         frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4159         if (!frag)
4160                 goto unmap_frag;
4161         if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4162                                dma_handle))
4163                 goto nest_cancel;
4164         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4165                                buf_len))
4166                 goto nest_cancel;
4167         rocker_tlv_nest_end(desc_info, frag);
4168         return 0;
4169
4170 nest_cancel:
4171         rocker_tlv_nest_cancel(desc_info, frag);
4172 unmap_frag:
4173         pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4174         return -EMSGSIZE;
4175 }
4176
4177 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4178 {
4179         struct rocker_port *rocker_port = netdev_priv(dev);
4180         struct rocker *rocker = rocker_port->rocker;
4181         struct rocker_desc_info *desc_info;
4182         struct rocker_tlv *frags;
4183         int i;
4184         int err;
4185
4186         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4187         if (unlikely(!desc_info)) {
4188                 if (net_ratelimit())
4189                         netdev_err(dev, "tx ring full when queue awake\n");
4190                 return NETDEV_TX_BUSY;
4191         }
4192
4193         rocker_desc_cookie_ptr_set(desc_info, skb);
4194
4195         frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4196         if (!frags)
4197                 goto out;
4198         err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4199                                           skb->data, skb_headlen(skb));
4200         if (err)
4201                 goto nest_cancel;
4202         if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4203                 err = skb_linearize(skb);
4204                 if (err)
4205                         goto unmap_frags;
4206         }
4207
4208         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4209                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4210
4211                 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4212                                                   skb_frag_address(frag),
4213                                                   skb_frag_size(frag));
4214                 if (err)
4215                         goto unmap_frags;
4216         }
4217         rocker_tlv_nest_end(desc_info, frags);
4218
4219         rocker_desc_gen_clear(desc_info);
4220         rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4221
4222         desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4223         if (!desc_info)
4224                 netif_stop_queue(dev);
4225
4226         return NETDEV_TX_OK;
4227
4228 unmap_frags:
4229         rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4230 nest_cancel:
4231         rocker_tlv_nest_cancel(desc_info, frags);
4232 out:
4233         dev_kfree_skb(skb);
4234         dev->stats.tx_dropped++;
4235
4236         return NETDEV_TX_OK;
4237 }
4238
4239 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4240 {
4241         struct sockaddr *addr = p;
4242         struct rocker_port *rocker_port = netdev_priv(dev);
4243         int err;
4244
4245         if (!is_valid_ether_addr(addr->sa_data))
4246                 return -EADDRNOTAVAIL;
4247
4248         err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4249         if (err)
4250                 return err;
4251         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4252         return 0;
4253 }
4254
4255 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4256 {
4257         struct rocker_port *rocker_port = netdev_priv(dev);
4258         int running = netif_running(dev);
4259         int err;
4260
4261 #define ROCKER_PORT_MIN_MTU     68
4262 #define ROCKER_PORT_MAX_MTU     9000
4263
4264         if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4265                 return -EINVAL;
4266
4267         if (running)
4268                 rocker_port_stop(dev);
4269
4270         netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4271         dev->mtu = new_mtu;
4272
4273         err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4274         if (err)
4275                 return err;
4276
4277         if (running)
4278                 err = rocker_port_open(dev);
4279
4280         return err;
4281 }
4282
4283 static int rocker_port_get_phys_port_name(struct net_device *dev,
4284                                           char *buf, size_t len)
4285 {
4286         struct rocker_port *rocker_port = netdev_priv(dev);
4287         struct port_name name = { .buf = buf, .len = len };
4288         int err;
4289
4290         err = rocker_cmd_exec(rocker_port, NULL, 0,
4291                               rocker_cmd_get_port_settings_prep, NULL,
4292                               rocker_cmd_get_port_settings_phys_name_proc,
4293                               &name);
4294
4295         return err ? -EOPNOTSUPP : 0;
4296 }
4297
4298 static int rocker_port_change_proto_down(struct net_device *dev,
4299                                          bool proto_down)
4300 {
4301         struct rocker_port *rocker_port = netdev_priv(dev);
4302
4303         if (rocker_port->dev->flags & IFF_UP)
4304                 rocker_port_set_enable(rocker_port, !proto_down);
4305         rocker_port->dev->proto_down = proto_down;
4306         return 0;
4307 }
4308
4309 static void rocker_port_neigh_destroy(struct neighbour *n)
4310 {
4311         struct rocker_port *rocker_port = netdev_priv(n->dev);
4312         int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4313         __be32 ip_addr = *(__be32 *)n->primary_key;
4314
4315         rocker_port_ipv4_neigh(rocker_port, NULL,
4316                                flags, ip_addr, n->ha);
4317 }
4318
4319 static const struct net_device_ops rocker_port_netdev_ops = {
4320         .ndo_open                       = rocker_port_open,
4321         .ndo_stop                       = rocker_port_stop,
4322         .ndo_start_xmit                 = rocker_port_xmit,
4323         .ndo_set_mac_address            = rocker_port_set_mac_address,
4324         .ndo_change_mtu                 = rocker_port_change_mtu,
4325         .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
4326         .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
4327         .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
4328         .ndo_fdb_add                    = switchdev_port_fdb_add,
4329         .ndo_fdb_del                    = switchdev_port_fdb_del,
4330         .ndo_fdb_dump                   = switchdev_port_fdb_dump,
4331         .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
4332         .ndo_change_proto_down          = rocker_port_change_proto_down,
4333         .ndo_neigh_destroy              = rocker_port_neigh_destroy,
4334 };
4335
4336 /********************
4337  * swdev interface
4338  ********************/
4339
4340 static int rocker_port_attr_get(struct net_device *dev,
4341                                 struct switchdev_attr *attr)
4342 {
4343         const struct rocker_port *rocker_port = netdev_priv(dev);
4344         const struct rocker *rocker = rocker_port->rocker;
4345
4346         switch (attr->id) {
4347         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4348                 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4349                 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4350                 break;
4351         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4352                 attr->u.brport_flags = rocker_port->brport_flags;
4353                 break;
4354         default:
4355                 return -EOPNOTSUPP;
4356         }
4357
4358         return 0;
4359 }
4360
4361 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4362                                         struct switchdev_trans *trans,
4363                                         unsigned long brport_flags)
4364 {
4365         unsigned long orig_flags;
4366         int err = 0;
4367
4368         orig_flags = rocker_port->brport_flags;
4369         rocker_port->brport_flags = brport_flags;
4370         if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4371                 err = rocker_port_set_learning(rocker_port, trans);
4372
4373         if (switchdev_trans_ph_prepare(trans))
4374                 rocker_port->brport_flags = orig_flags;
4375
4376         return err;
4377 }
4378
4379 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4380                                           struct switchdev_trans *trans,
4381                                           u32 ageing_time)
4382 {
4383         struct rocker *rocker = rocker_port->rocker;
4384
4385         if (!switchdev_trans_ph_prepare(trans)) {
4386                 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4387                 if (rocker_port->ageing_time < rocker->ageing_time)
4388                         rocker->ageing_time = rocker_port->ageing_time;
4389                 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4390         }
4391
4392         return 0;
4393 }
4394
4395 static int rocker_port_attr_set(struct net_device *dev,
4396                                 const struct switchdev_attr *attr,
4397                                 struct switchdev_trans *trans)
4398 {
4399         struct rocker_port *rocker_port = netdev_priv(dev);
4400         int err = 0;
4401
4402         switch (attr->id) {
4403         case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4404                 err = rocker_port_stp_update(rocker_port, trans, 0,
4405                                              attr->u.stp_state);
4406                 break;
4407         case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4408                 err = rocker_port_brport_flags_set(rocker_port, trans,
4409                                                    attr->u.brport_flags);
4410                 break;
4411         case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4412                 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4413                                                      attr->u.ageing_time);
4414                 break;
4415         default:
4416                 err = -EOPNOTSUPP;
4417                 break;
4418         }
4419
4420         return err;
4421 }
4422
4423 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4424                                 struct switchdev_trans *trans,
4425                                 u16 vid, u16 flags)
4426 {
4427         int err;
4428
4429         /* XXX deal with flags for PVID and untagged */
4430
4431         err = rocker_port_vlan(rocker_port, trans, 0, vid);
4432         if (err)
4433                 return err;
4434
4435         err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4436         if (err)
4437                 rocker_port_vlan(rocker_port, trans,
4438                                  ROCKER_OP_FLAG_REMOVE, vid);
4439
4440         return err;
4441 }
4442
4443 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4444                                  struct switchdev_trans *trans,
4445                                  const struct switchdev_obj_port_vlan *vlan)
4446 {
4447         u16 vid;
4448         int err;
4449
4450         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4451                 err = rocker_port_vlan_add(rocker_port, trans,
4452                                            vid, vlan->flags);
4453                 if (err)
4454                         return err;
4455         }
4456
4457         return 0;
4458 }
4459
4460 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4461                                struct switchdev_trans *trans,
4462                                const struct switchdev_obj_port_fdb *fdb)
4463 {
4464         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4465         int flags = 0;
4466
4467         if (!rocker_port_is_bridged(rocker_port))
4468                 return -EINVAL;
4469
4470         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4471 }
4472
4473 static int rocker_port_obj_add(struct net_device *dev,
4474                                const struct switchdev_obj *obj,
4475                                struct switchdev_trans *trans)
4476 {
4477         struct rocker_port *rocker_port = netdev_priv(dev);
4478         const struct switchdev_obj_ipv4_fib *fib4;
4479         int err = 0;
4480
4481         switch (obj->id) {
4482         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4483                 err = rocker_port_vlans_add(rocker_port, trans,
4484                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4485                 break;
4486         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4487                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4488                 err = rocker_port_fib_ipv4(rocker_port, trans,
4489                                            htonl(fib4->dst), fib4->dst_len,
4490                                            fib4->fi, fib4->tb_id, 0);
4491                 break;
4492         case SWITCHDEV_OBJ_ID_PORT_FDB:
4493                 err = rocker_port_fdb_add(rocker_port, trans,
4494                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4495                 break;
4496         default:
4497                 err = -EOPNOTSUPP;
4498                 break;
4499         }
4500
4501         return err;
4502 }
4503
4504 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4505                                 u16 vid, u16 flags)
4506 {
4507         int err;
4508
4509         err = rocker_port_router_mac(rocker_port, NULL,
4510                                      ROCKER_OP_FLAG_REMOVE, htons(vid));
4511         if (err)
4512                 return err;
4513
4514         return rocker_port_vlan(rocker_port, NULL,
4515                                 ROCKER_OP_FLAG_REMOVE, vid);
4516 }
4517
4518 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4519                                  const struct switchdev_obj_port_vlan *vlan)
4520 {
4521         u16 vid;
4522         int err;
4523
4524         for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4525                 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4526                 if (err)
4527                         return err;
4528         }
4529
4530         return 0;
4531 }
4532
4533 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4534                                struct switchdev_trans *trans,
4535                                const struct switchdev_obj_port_fdb *fdb)
4536 {
4537         __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4538         int flags = ROCKER_OP_FLAG_REMOVE;
4539
4540         if (!rocker_port_is_bridged(rocker_port))
4541                 return -EINVAL;
4542
4543         return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4544 }
4545
4546 static int rocker_port_obj_del(struct net_device *dev,
4547                                const struct switchdev_obj *obj)
4548 {
4549         struct rocker_port *rocker_port = netdev_priv(dev);
4550         const struct switchdev_obj_ipv4_fib *fib4;
4551         int err = 0;
4552
4553         switch (obj->id) {
4554         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4555                 err = rocker_port_vlans_del(rocker_port,
4556                                             SWITCHDEV_OBJ_PORT_VLAN(obj));
4557                 break;
4558         case SWITCHDEV_OBJ_ID_IPV4_FIB:
4559                 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4560                 err = rocker_port_fib_ipv4(rocker_port, NULL,
4561                                            htonl(fib4->dst), fib4->dst_len,
4562                                            fib4->fi, fib4->tb_id,
4563                                            ROCKER_OP_FLAG_REMOVE);
4564                 break;
4565         case SWITCHDEV_OBJ_ID_PORT_FDB:
4566                 err = rocker_port_fdb_del(rocker_port, NULL,
4567                                           SWITCHDEV_OBJ_PORT_FDB(obj));
4568                 break;
4569         default:
4570                 err = -EOPNOTSUPP;
4571                 break;
4572         }
4573
4574         return err;
4575 }
4576
4577 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4578                                 struct switchdev_obj_port_fdb *fdb,
4579                                 switchdev_obj_dump_cb_t *cb)
4580 {
4581         struct rocker *rocker = rocker_port->rocker;
4582         struct rocker_fdb_tbl_entry *found;
4583         struct hlist_node *tmp;
4584         unsigned long lock_flags;
4585         int bkt;
4586         int err = 0;
4587
4588         spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4589         hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4590                 if (found->key.rocker_port != rocker_port)
4591                         continue;
4592                 ether_addr_copy(fdb->addr, found->key.addr);
4593                 fdb->ndm_state = NUD_REACHABLE;
4594                 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4595                                                    found->key.vlan_id);
4596                 err = cb(&fdb->obj);
4597                 if (err)
4598                         break;
4599         }
4600         spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4601
4602         return err;
4603 }
4604
4605 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4606                                  struct switchdev_obj_port_vlan *vlan,
4607                                  switchdev_obj_dump_cb_t *cb)
4608 {
4609         u16 vid;
4610         int err = 0;
4611
4612         for (vid = 1; vid < VLAN_N_VID; vid++) {
4613                 if (!test_bit(vid, rocker_port->vlan_bitmap))
4614                         continue;
4615                 vlan->flags = 0;
4616                 if (rocker_vlan_id_is_internal(htons(vid)))
4617                         vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4618                 vlan->vid_begin = vlan->vid_end = vid;
4619                 err = cb(&vlan->obj);
4620                 if (err)
4621                         break;
4622         }
4623
4624         return err;
4625 }
4626
4627 static int rocker_port_obj_dump(struct net_device *dev,
4628                                 struct switchdev_obj *obj,
4629                                 switchdev_obj_dump_cb_t *cb)
4630 {
4631         const struct rocker_port *rocker_port = netdev_priv(dev);
4632         int err = 0;
4633
4634         switch (obj->id) {
4635         case SWITCHDEV_OBJ_ID_PORT_FDB:
4636                 err = rocker_port_fdb_dump(rocker_port,
4637                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4638                 break;
4639         case SWITCHDEV_OBJ_ID_PORT_VLAN:
4640                 err = rocker_port_vlan_dump(rocker_port,
4641                                             SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4642                 break;
4643         default:
4644                 err = -EOPNOTSUPP;
4645                 break;
4646         }
4647
4648         return err;
4649 }
4650
4651 static const struct switchdev_ops rocker_port_switchdev_ops = {
4652         .switchdev_port_attr_get        = rocker_port_attr_get,
4653         .switchdev_port_attr_set        = rocker_port_attr_set,
4654         .switchdev_port_obj_add         = rocker_port_obj_add,
4655         .switchdev_port_obj_del         = rocker_port_obj_del,
4656         .switchdev_port_obj_dump        = rocker_port_obj_dump,
4657 };
4658
4659 /********************
4660  * ethtool interface
4661  ********************/
4662
4663 static int rocker_port_get_settings(struct net_device *dev,
4664                                     struct ethtool_cmd *ecmd)
4665 {
4666         struct rocker_port *rocker_port = netdev_priv(dev);
4667
4668         return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4669 }
4670
4671 static int rocker_port_set_settings(struct net_device *dev,
4672                                     struct ethtool_cmd *ecmd)
4673 {
4674         struct rocker_port *rocker_port = netdev_priv(dev);
4675
4676         return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4677 }
4678
4679 static void rocker_port_get_drvinfo(struct net_device *dev,
4680                                     struct ethtool_drvinfo *drvinfo)
4681 {
4682         strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4683         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4684 }
4685
4686 static struct rocker_port_stats {
4687         char str[ETH_GSTRING_LEN];
4688         int type;
4689 } rocker_port_stats[] = {
4690         { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
4691         { "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
4692         { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4693         { "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
4694
4695         { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
4696         { "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
4697         { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4698         { "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
4699 };
4700
4701 #define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
4702
4703 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4704                                     u8 *data)
4705 {
4706         u8 *p = data;
4707         int i;
4708
4709         switch (stringset) {
4710         case ETH_SS_STATS:
4711                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4712                         memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4713                         p += ETH_GSTRING_LEN;
4714                 }
4715                 break;
4716         }
4717 }
4718
4719 static int
4720 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4721                                struct rocker_desc_info *desc_info,
4722                                void *priv)
4723 {
4724         struct rocker_tlv *cmd_stats;
4725
4726         if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4727                                ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4728                 return -EMSGSIZE;
4729
4730         cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4731         if (!cmd_stats)
4732                 return -EMSGSIZE;
4733
4734         if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4735                                rocker_port->pport))
4736                 return -EMSGSIZE;
4737
4738         rocker_tlv_nest_end(desc_info, cmd_stats);
4739
4740         return 0;
4741 }
4742
4743 static int
4744 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4745                                        const struct rocker_desc_info *desc_info,
4746                                        void *priv)
4747 {
4748         const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4749         const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4750         const struct rocker_tlv *pattr;
4751         u32 pport;
4752         u64 *data = priv;
4753         int i;
4754
4755         rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4756
4757         if (!attrs[ROCKER_TLV_CMD_INFO])
4758                 return -EIO;
4759
4760         rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4761                                 attrs[ROCKER_TLV_CMD_INFO]);
4762
4763         if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4764                 return -EIO;
4765
4766         pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4767         if (pport != rocker_port->pport)
4768                 return -EIO;
4769
4770         for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4771                 pattr = stats_attrs[rocker_port_stats[i].type];
4772                 if (!pattr)
4773                         continue;
4774
4775                 data[i] = rocker_tlv_get_u64(pattr);
4776         }
4777
4778         return 0;
4779 }
4780
4781 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4782                                              void *priv)
4783 {
4784         return rocker_cmd_exec(rocker_port, NULL, 0,
4785                                rocker_cmd_get_port_stats_prep, NULL,
4786                                rocker_cmd_get_port_stats_ethtool_proc,
4787                                priv);
4788 }
4789
4790 static void rocker_port_get_stats(struct net_device *dev,
4791                                   struct ethtool_stats *stats, u64 *data)
4792 {
4793         struct rocker_port *rocker_port = netdev_priv(dev);
4794
4795         if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4796                 int i;
4797
4798                 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4799                         data[i] = 0;
4800         }
4801 }
4802
4803 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4804 {
4805         switch (sset) {
4806         case ETH_SS_STATS:
4807                 return ROCKER_PORT_STATS_LEN;
4808         default:
4809                 return -EOPNOTSUPP;
4810         }
4811 }
4812
4813 static const struct ethtool_ops rocker_port_ethtool_ops = {
4814         .get_settings           = rocker_port_get_settings,
4815         .set_settings           = rocker_port_set_settings,
4816         .get_drvinfo            = rocker_port_get_drvinfo,
4817         .get_link               = ethtool_op_get_link,
4818         .get_strings            = rocker_port_get_strings,
4819         .get_ethtool_stats      = rocker_port_get_stats,
4820         .get_sset_count         = rocker_port_get_sset_count,
4821 };
4822
4823 /*****************
4824  * NAPI interface
4825  *****************/
4826
4827 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4828 {
4829         return container_of(napi, struct rocker_port, napi_tx);
4830 }
4831
4832 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4833 {
4834         struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4835         const struct rocker *rocker = rocker_port->rocker;
4836         const struct rocker_desc_info *desc_info;
4837         u32 credits = 0;
4838         int err;
4839
4840         /* Cleanup tx descriptors */
4841         while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4842                 struct sk_buff *skb;
4843
4844                 err = rocker_desc_err(desc_info);
4845                 if (err && net_ratelimit())
4846                         netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4847                                    err);
4848                 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4849
4850                 skb = rocker_desc_cookie_ptr_get(desc_info);
4851                 if (err == 0) {
4852                         rocker_port->dev->stats.tx_packets++;
4853                         rocker_port->dev->stats.tx_bytes += skb->len;
4854                 } else {
4855                         rocker_port->dev->stats.tx_errors++;
4856                 }
4857
4858                 dev_kfree_skb_any(skb);
4859                 credits++;
4860         }
4861
4862         if (credits && netif_queue_stopped(rocker_port->dev))
4863                 netif_wake_queue(rocker_port->dev);
4864
4865         napi_complete(napi);
4866         rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4867
4868         return 0;
4869 }
4870
4871 static int rocker_port_rx_proc(const struct rocker *rocker,
4872                                const struct rocker_port *rocker_port,
4873                                struct rocker_desc_info *desc_info)
4874 {
4875         const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4876         struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4877         size_t rx_len;
4878         u16 rx_flags = 0;
4879
4880         if (!skb)
4881                 return -ENOENT;
4882
4883         rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4884         if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4885                 return -EINVAL;
4886         if (attrs[ROCKER_TLV_RX_FLAGS])
4887                 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
4888
4889         rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4890
4891         rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4892         skb_put(skb, rx_len);
4893         skb->protocol = eth_type_trans(skb, rocker_port->dev);
4894
4895         if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4896                 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4897
4898         rocker_port->dev->stats.rx_packets++;
4899         rocker_port->dev->stats.rx_bytes += skb->len;
4900
4901         netif_receive_skb(skb);
4902
4903         return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
4904 }
4905
4906 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4907 {
4908         return container_of(napi, struct rocker_port, napi_rx);
4909 }
4910
4911 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4912 {
4913         struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4914         const struct rocker *rocker = rocker_port->rocker;
4915         struct rocker_desc_info *desc_info;
4916         u32 credits = 0;
4917         int err;
4918
4919         /* Process rx descriptors */
4920         while (credits < budget &&
4921                (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4922                 err = rocker_desc_err(desc_info);
4923                 if (err) {
4924                         if (net_ratelimit())
4925                                 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4926                                            err);
4927                 } else {
4928                         err = rocker_port_rx_proc(rocker, rocker_port,
4929                                                   desc_info);
4930                         if (err && net_ratelimit())
4931                                 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4932                                            err);
4933                 }
4934                 if (err)
4935                         rocker_port->dev->stats.rx_errors++;
4936
4937                 rocker_desc_gen_clear(desc_info);
4938                 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4939                 credits++;
4940         }
4941
4942         if (credits < budget)
4943                 napi_complete(napi);
4944
4945         rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4946
4947         return credits;
4948 }
4949
4950 /*****************
4951  * PCI driver ops
4952  *****************/
4953
4954 static void rocker_carrier_init(const struct rocker_port *rocker_port)
4955 {
4956         const struct rocker *rocker = rocker_port->rocker;
4957         u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4958         bool link_up;
4959
4960         link_up = link_status & (1 << rocker_port->pport);
4961         if (link_up)
4962                 netif_carrier_on(rocker_port->dev);
4963         else
4964                 netif_carrier_off(rocker_port->dev);
4965 }
4966
4967 static void rocker_remove_ports(const struct rocker *rocker)
4968 {
4969         struct rocker_port *rocker_port;
4970         int i;
4971
4972         for (i = 0; i < rocker->port_count; i++) {
4973                 rocker_port = rocker->ports[i];
4974                 if (!rocker_port)
4975                         continue;
4976                 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
4977                 unregister_netdev(rocker_port->dev);
4978                 free_netdev(rocker_port->dev);
4979         }
4980         kfree(rocker->ports);
4981 }
4982
4983 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
4984 {
4985         const struct rocker *rocker = rocker_port->rocker;
4986         const struct pci_dev *pdev = rocker->pdev;
4987         int err;
4988
4989         err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4990                                                    rocker_port->dev->dev_addr);
4991         if (err) {
4992                 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4993                 eth_hw_addr_random(rocker_port->dev);
4994         }
4995 }
4996
4997 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4998 {
4999         const struct pci_dev *pdev = rocker->pdev;
5000         struct rocker_port *rocker_port;
5001         struct net_device *dev;
5002         u16 untagged_vid = 0;
5003         int err;
5004
5005         dev = alloc_etherdev(sizeof(struct rocker_port));
5006         if (!dev)
5007                 return -ENOMEM;
5008         rocker_port = netdev_priv(dev);
5009         rocker_port->dev = dev;
5010         rocker_port->rocker = rocker;
5011         rocker_port->port_number = port_number;
5012         rocker_port->pport = port_number + 1;
5013         rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
5014         rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
5015
5016         rocker_port_dev_addr_init(rocker_port);
5017         dev->netdev_ops = &rocker_port_netdev_ops;
5018         dev->ethtool_ops = &rocker_port_ethtool_ops;
5019         dev->switchdev_ops = &rocker_port_switchdev_ops;
5020         netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5021                        NAPI_POLL_WEIGHT);
5022         netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5023                        NAPI_POLL_WEIGHT);
5024         rocker_carrier_init(rocker_port);
5025
5026         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
5027
5028         err = register_netdev(dev);
5029         if (err) {
5030                 dev_err(&pdev->dev, "register_netdev failed\n");
5031                 goto err_register_netdev;
5032         }
5033         rocker->ports[port_number] = rocker_port;
5034
5035         switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5036
5037         rocker_port_set_learning(rocker_port, NULL);
5038
5039         err = rocker_port_ig_tbl(rocker_port, NULL, 0);
5040         if (err) {
5041                 netdev_err(rocker_port->dev, "install ig port table failed\n");
5042                 goto err_port_ig_tbl;
5043         }
5044
5045         rocker_port->internal_vlan_id =
5046                 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5047
5048         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5049         if (err) {
5050                 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5051                 goto err_untagged_vlan;
5052         }
5053
5054         return 0;
5055
5056 err_untagged_vlan:
5057         rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5058 err_port_ig_tbl:
5059         rocker->ports[port_number] = NULL;
5060         unregister_netdev(dev);
5061 err_register_netdev:
5062         free_netdev(dev);
5063         return err;
5064 }
5065
5066 static int rocker_probe_ports(struct rocker *rocker)
5067 {
5068         int i;
5069         size_t alloc_size;
5070         int err;
5071
5072         alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5073         rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5074         if (!rocker->ports)
5075                 return -ENOMEM;
5076         for (i = 0; i < rocker->port_count; i++) {
5077                 err = rocker_probe_port(rocker, i);
5078                 if (err)
5079                         goto remove_ports;
5080         }
5081         return 0;
5082
5083 remove_ports:
5084         rocker_remove_ports(rocker);
5085         return err;
5086 }
5087
5088 static int rocker_msix_init(struct rocker *rocker)
5089 {
5090         struct pci_dev *pdev = rocker->pdev;
5091         int msix_entries;
5092         int i;
5093         int err;
5094
5095         msix_entries = pci_msix_vec_count(pdev);
5096         if (msix_entries < 0)
5097                 return msix_entries;
5098
5099         if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5100                 return -EINVAL;
5101
5102         rocker->msix_entries = kmalloc_array(msix_entries,
5103                                              sizeof(struct msix_entry),
5104                                              GFP_KERNEL);
5105         if (!rocker->msix_entries)
5106                 return -ENOMEM;
5107
5108         for (i = 0; i < msix_entries; i++)
5109                 rocker->msix_entries[i].entry = i;
5110
5111         err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5112         if (err < 0)
5113                 goto err_enable_msix;
5114
5115         return 0;
5116
5117 err_enable_msix:
5118         kfree(rocker->msix_entries);
5119         return err;
5120 }
5121
5122 static void rocker_msix_fini(const struct rocker *rocker)
5123 {
5124         pci_disable_msix(rocker->pdev);
5125         kfree(rocker->msix_entries);
5126 }
5127
5128 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5129 {
5130         struct rocker *rocker;
5131         int err;
5132
5133         rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5134         if (!rocker)
5135                 return -ENOMEM;
5136
5137         err = pci_enable_device(pdev);
5138         if (err) {
5139                 dev_err(&pdev->dev, "pci_enable_device failed\n");
5140                 goto err_pci_enable_device;
5141         }
5142
5143         err = pci_request_regions(pdev, rocker_driver_name);
5144         if (err) {
5145                 dev_err(&pdev->dev, "pci_request_regions failed\n");
5146                 goto err_pci_request_regions;
5147         }
5148
5149         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5150         if (!err) {
5151                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5152                 if (err) {
5153                         dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5154                         goto err_pci_set_dma_mask;
5155                 }
5156         } else {
5157                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5158                 if (err) {
5159                         dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5160                         goto err_pci_set_dma_mask;
5161                 }
5162         }
5163
5164         if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5165                 dev_err(&pdev->dev, "invalid PCI region size\n");
5166                 err = -EINVAL;
5167                 goto err_pci_resource_len_check;
5168         }
5169
5170         rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5171                                   pci_resource_len(pdev, 0));
5172         if (!rocker->hw_addr) {
5173                 dev_err(&pdev->dev, "ioremap failed\n");
5174                 err = -EIO;
5175                 goto err_ioremap;
5176         }
5177         pci_set_master(pdev);
5178
5179         rocker->pdev = pdev;
5180         pci_set_drvdata(pdev, rocker);
5181
5182         rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5183
5184         err = rocker_msix_init(rocker);
5185         if (err) {
5186                 dev_err(&pdev->dev, "MSI-X init failed\n");
5187                 goto err_msix_init;
5188         }
5189
5190         err = rocker_basic_hw_test(rocker);
5191         if (err) {
5192                 dev_err(&pdev->dev, "basic hw test failed\n");
5193                 goto err_basic_hw_test;
5194         }
5195
5196         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5197
5198         err = rocker_dma_rings_init(rocker);
5199         if (err)
5200                 goto err_dma_rings_init;
5201
5202         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5203                           rocker_cmd_irq_handler, 0,
5204                           rocker_driver_name, rocker);
5205         if (err) {
5206                 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5207                 goto err_request_cmd_irq;
5208         }
5209
5210         err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5211                           rocker_event_irq_handler, 0,
5212                           rocker_driver_name, rocker);
5213         if (err) {
5214                 dev_err(&pdev->dev, "cannot assign event irq\n");
5215                 goto err_request_event_irq;
5216         }
5217
5218         rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5219
5220         err = rocker_init_tbls(rocker);
5221         if (err) {
5222                 dev_err(&pdev->dev, "cannot init rocker tables\n");
5223                 goto err_init_tbls;
5224         }
5225
5226         rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
5227         setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5228                     (unsigned long) rocker);
5229         mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5230
5231         rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
5232
5233         err = rocker_probe_ports(rocker);
5234         if (err) {
5235                 dev_err(&pdev->dev, "failed to probe ports\n");
5236                 goto err_probe_ports;
5237         }
5238
5239         dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5240                  (int)sizeof(rocker->hw.id), &rocker->hw.id);
5241
5242         return 0;
5243
5244 err_probe_ports:
5245         del_timer_sync(&rocker->fdb_cleanup_timer);
5246         rocker_free_tbls(rocker);
5247 err_init_tbls:
5248         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5249 err_request_event_irq:
5250         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5251 err_request_cmd_irq:
5252         rocker_dma_rings_fini(rocker);
5253 err_dma_rings_init:
5254 err_basic_hw_test:
5255         rocker_msix_fini(rocker);
5256 err_msix_init:
5257         iounmap(rocker->hw_addr);
5258 err_ioremap:
5259 err_pci_resource_len_check:
5260 err_pci_set_dma_mask:
5261         pci_release_regions(pdev);
5262 err_pci_request_regions:
5263         pci_disable_device(pdev);
5264 err_pci_enable_device:
5265         kfree(rocker);
5266         return err;
5267 }
5268
5269 static void rocker_remove(struct pci_dev *pdev)
5270 {
5271         struct rocker *rocker = pci_get_drvdata(pdev);
5272
5273         del_timer_sync(&rocker->fdb_cleanup_timer);
5274         rocker_free_tbls(rocker);
5275         rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5276         rocker_remove_ports(rocker);
5277         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5278         free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5279         rocker_dma_rings_fini(rocker);
5280         rocker_msix_fini(rocker);
5281         iounmap(rocker->hw_addr);
5282         pci_release_regions(rocker->pdev);
5283         pci_disable_device(rocker->pdev);
5284         kfree(rocker);
5285 }
5286
5287 static struct pci_driver rocker_pci_driver = {
5288         .name           = rocker_driver_name,
5289         .id_table       = rocker_pci_id_table,
5290         .probe          = rocker_probe,
5291         .remove         = rocker_remove,
5292 };
5293
5294 /************************************
5295  * Net device notifier event handler
5296  ************************************/
5297
5298 static bool rocker_port_dev_check(const struct net_device *dev)
5299 {
5300         return dev->netdev_ops == &rocker_port_netdev_ops;
5301 }
5302
5303 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5304                                    struct net_device *bridge)
5305 {
5306         u16 untagged_vid = 0;
5307         int err;
5308
5309         /* Port is joining bridge, so the internal VLAN for the
5310          * port is going to change to the bridge internal VLAN.
5311          * Let's remove untagged VLAN (vid=0) from port and
5312          * re-add once internal VLAN has changed.
5313          */
5314
5315         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5316         if (err)
5317                 return err;
5318
5319         rocker_port_internal_vlan_id_put(rocker_port,
5320                                          rocker_port->dev->ifindex);
5321         rocker_port->internal_vlan_id =
5322                 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5323
5324         rocker_port->bridge_dev = bridge;
5325         switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5326
5327         return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5328 }
5329
5330 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5331 {
5332         u16 untagged_vid = 0;
5333         int err;
5334
5335         err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5336         if (err)
5337                 return err;
5338
5339         rocker_port_internal_vlan_id_put(rocker_port,
5340                                          rocker_port->bridge_dev->ifindex);
5341         rocker_port->internal_vlan_id =
5342                 rocker_port_internal_vlan_id_get(rocker_port,
5343                                                  rocker_port->dev->ifindex);
5344
5345         switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5346                                     false);
5347         rocker_port->bridge_dev = NULL;
5348
5349         err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5350         if (err)
5351                 return err;
5352
5353         if (rocker_port->dev->flags & IFF_UP)
5354                 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5355
5356         return err;
5357 }
5358
5359
5360 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5361                                    struct net_device *master)
5362 {
5363         int err;
5364
5365         rocker_port->bridge_dev = master;
5366
5367         err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5368         if (err)
5369                 return err;
5370         err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5371
5372         return err;
5373 }
5374
5375 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5376                                      struct net_device *master)
5377 {
5378         int err = 0;
5379
5380         if (netif_is_bridge_master(master))
5381                 err = rocker_port_bridge_join(rocker_port, master);
5382         else if (netif_is_ovs_master(master))
5383                 err = rocker_port_ovs_changed(rocker_port, master);
5384         return err;
5385 }
5386
5387 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5388 {
5389         int err = 0;
5390
5391         if (rocker_port_is_bridged(rocker_port))
5392                 err = rocker_port_bridge_leave(rocker_port);
5393         else if (rocker_port_is_ovsed(rocker_port))
5394                 err = rocker_port_ovs_changed(rocker_port, NULL);
5395         return err;
5396 }
5397
5398 static int rocker_netdevice_event(struct notifier_block *unused,
5399                                   unsigned long event, void *ptr)
5400 {
5401         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5402         struct netdev_notifier_changeupper_info *info;
5403         struct rocker_port *rocker_port;
5404         int err;
5405
5406         if (!rocker_port_dev_check(dev))
5407                 return NOTIFY_DONE;
5408
5409         switch (event) {
5410         case NETDEV_CHANGEUPPER:
5411                 info = ptr;
5412                 if (!info->master)
5413                         goto out;
5414                 rocker_port = netdev_priv(dev);
5415                 if (info->linking) {
5416                         err = rocker_port_master_linked(rocker_port,
5417                                                         info->upper_dev);
5418                         if (err)
5419                                 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5420                                             err);
5421                 } else {
5422                         err = rocker_port_master_unlinked(rocker_port);
5423                         if (err)
5424                                 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5425                                             err);
5426                 }
5427                 break;
5428         }
5429 out:
5430         return NOTIFY_DONE;
5431 }
5432
5433 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5434         .notifier_call = rocker_netdevice_event,
5435 };
5436
5437 /************************************
5438  * Net event notifier event handler
5439  ************************************/
5440
5441 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5442 {
5443         struct rocker_port *rocker_port = netdev_priv(dev);
5444         int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5445                     ROCKER_OP_FLAG_NOWAIT;
5446         __be32 ip_addr = *(__be32 *)n->primary_key;
5447
5448         return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5449 }
5450
5451 static int rocker_netevent_event(struct notifier_block *unused,
5452                                  unsigned long event, void *ptr)
5453 {
5454         struct net_device *dev;
5455         struct neighbour *n = ptr;
5456         int err;
5457
5458         switch (event) {
5459         case NETEVENT_NEIGH_UPDATE:
5460                 if (n->tbl != &arp_tbl)
5461                         return NOTIFY_DONE;
5462                 dev = n->dev;
5463                 if (!rocker_port_dev_check(dev))
5464                         return NOTIFY_DONE;
5465                 err = rocker_neigh_update(dev, n);
5466                 if (err)
5467                         netdev_warn(dev,
5468                                     "failed to handle neigh update (err %d)\n",
5469                                     err);
5470                 break;
5471         }
5472
5473         return NOTIFY_DONE;
5474 }
5475
5476 static struct notifier_block rocker_netevent_nb __read_mostly = {
5477         .notifier_call = rocker_netevent_event,
5478 };
5479
5480 /***********************
5481  * Module init and exit
5482  ***********************/
5483
5484 static int __init rocker_module_init(void)
5485 {
5486         int err;
5487
5488         register_netdevice_notifier(&rocker_netdevice_nb);
5489         register_netevent_notifier(&rocker_netevent_nb);
5490         err = pci_register_driver(&rocker_pci_driver);
5491         if (err)
5492                 goto err_pci_register_driver;
5493         return 0;
5494
5495 err_pci_register_driver:
5496         unregister_netevent_notifier(&rocker_netevent_nb);
5497         unregister_netdevice_notifier(&rocker_netdevice_nb);
5498         return err;
5499 }
5500
5501 static void __exit rocker_module_exit(void)
5502 {
5503         unregister_netevent_notifier(&rocker_netevent_nb);
5504         unregister_netdevice_notifier(&rocker_netdevice_nb);
5505         pci_unregister_driver(&rocker_pci_driver);
5506 }
5507
5508 module_init(rocker_module_init);
5509 module_exit(rocker_module_exit);
5510
5511 MODULE_LICENSE("GPL v2");
5512 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5513 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5514 MODULE_DESCRIPTION("Rocker switch device driver");
5515 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);