GNU Linux-libre 4.9.317-gnu1
[releases.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49 #include "mlx4_stats.h"
50
51 #define MLX4_MAC_VALID          (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT       2
53 #define MLX4_VF_COUNTERS_PER_PORT       1
54
55 struct mac_res {
56         struct list_head list;
57         u64 mac;
58         int ref_count;
59         u8 smac_index;
60         u8 port;
61 };
62
63 struct vlan_res {
64         struct list_head list;
65         u16 vlan;
66         int ref_count;
67         int vlan_index;
68         u8 port;
69 };
70
71 struct res_common {
72         struct list_head        list;
73         struct rb_node          node;
74         u64                     res_id;
75         int                     owner;
76         int                     state;
77         int                     from_state;
78         int                     to_state;
79         int                     removing;
80 };
81
82 enum {
83         RES_ANY_BUSY = 1
84 };
85
86 struct res_gid {
87         struct list_head        list;
88         u8                      gid[16];
89         enum mlx4_protocol      prot;
90         enum mlx4_steer_type    steer;
91         u64                     reg_id;
92 };
93
94 enum res_qp_states {
95         RES_QP_BUSY = RES_ANY_BUSY,
96
97         /* QP number was allocated */
98         RES_QP_RESERVED,
99
100         /* ICM memory for QP context was mapped */
101         RES_QP_MAPPED,
102
103         /* QP is in hw ownership */
104         RES_QP_HW
105 };
106
107 struct res_qp {
108         struct res_common       com;
109         struct res_mtt         *mtt;
110         struct res_cq          *rcq;
111         struct res_cq          *scq;
112         struct res_srq         *srq;
113         struct list_head        mcg_list;
114         spinlock_t              mcg_spl;
115         int                     local_qpn;
116         atomic_t                ref_count;
117         u32                     qpc_flags;
118         /* saved qp params before VST enforcement in order to restore on VGT */
119         u8                      sched_queue;
120         __be32                  param3;
121         u8                      vlan_control;
122         u8                      fvl_rx;
123         u8                      pri_path_fl;
124         u8                      vlan_index;
125         u8                      feup;
126 };
127
128 enum res_mtt_states {
129         RES_MTT_BUSY = RES_ANY_BUSY,
130         RES_MTT_ALLOCATED,
131 };
132
133 static inline const char *mtt_states_str(enum res_mtt_states state)
134 {
135         switch (state) {
136         case RES_MTT_BUSY: return "RES_MTT_BUSY";
137         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
138         default: return "Unknown";
139         }
140 }
141
142 struct res_mtt {
143         struct res_common       com;
144         int                     order;
145         atomic_t                ref_count;
146 };
147
148 enum res_mpt_states {
149         RES_MPT_BUSY = RES_ANY_BUSY,
150         RES_MPT_RESERVED,
151         RES_MPT_MAPPED,
152         RES_MPT_HW,
153 };
154
155 struct res_mpt {
156         struct res_common       com;
157         struct res_mtt         *mtt;
158         int                     key;
159 };
160
161 enum res_eq_states {
162         RES_EQ_BUSY = RES_ANY_BUSY,
163         RES_EQ_RESERVED,
164         RES_EQ_HW,
165 };
166
167 struct res_eq {
168         struct res_common       com;
169         struct res_mtt         *mtt;
170 };
171
172 enum res_cq_states {
173         RES_CQ_BUSY = RES_ANY_BUSY,
174         RES_CQ_ALLOCATED,
175         RES_CQ_HW,
176 };
177
178 struct res_cq {
179         struct res_common       com;
180         struct res_mtt         *mtt;
181         atomic_t                ref_count;
182 };
183
184 enum res_srq_states {
185         RES_SRQ_BUSY = RES_ANY_BUSY,
186         RES_SRQ_ALLOCATED,
187         RES_SRQ_HW,
188 };
189
190 struct res_srq {
191         struct res_common       com;
192         struct res_mtt         *mtt;
193         struct res_cq          *cq;
194         atomic_t                ref_count;
195 };
196
197 enum res_counter_states {
198         RES_COUNTER_BUSY = RES_ANY_BUSY,
199         RES_COUNTER_ALLOCATED,
200 };
201
202 struct res_counter {
203         struct res_common       com;
204         int                     port;
205 };
206
207 enum res_xrcdn_states {
208         RES_XRCD_BUSY = RES_ANY_BUSY,
209         RES_XRCD_ALLOCATED,
210 };
211
212 struct res_xrcdn {
213         struct res_common       com;
214         int                     port;
215 };
216
217 enum res_fs_rule_states {
218         RES_FS_RULE_BUSY = RES_ANY_BUSY,
219         RES_FS_RULE_ALLOCATED,
220 };
221
222 struct res_fs_rule {
223         struct res_common       com;
224         int                     qpn;
225         /* VF DMFS mbox with port flipped */
226         void                    *mirr_mbox;
227         /* > 0 --> apply mirror when getting into HA mode      */
228         /* = 0 --> un-apply mirror when getting out of HA mode */
229         u32                     mirr_mbox_size;
230         struct list_head        mirr_list;
231         u64                     mirr_rule_id;
232 };
233
234 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
235 {
236         struct rb_node *node = root->rb_node;
237
238         while (node) {
239                 struct res_common *res = container_of(node, struct res_common,
240                                                       node);
241
242                 if (res_id < res->res_id)
243                         node = node->rb_left;
244                 else if (res_id > res->res_id)
245                         node = node->rb_right;
246                 else
247                         return res;
248         }
249         return NULL;
250 }
251
252 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
253 {
254         struct rb_node **new = &(root->rb_node), *parent = NULL;
255
256         /* Figure out where to put new node */
257         while (*new) {
258                 struct res_common *this = container_of(*new, struct res_common,
259                                                        node);
260
261                 parent = *new;
262                 if (res->res_id < this->res_id)
263                         new = &((*new)->rb_left);
264                 else if (res->res_id > this->res_id)
265                         new = &((*new)->rb_right);
266                 else
267                         return -EEXIST;
268         }
269
270         /* Add new node and rebalance tree. */
271         rb_link_node(&res->node, parent, new);
272         rb_insert_color(&res->node, root);
273
274         return 0;
275 }
276
277 enum qp_transition {
278         QP_TRANS_INIT2RTR,
279         QP_TRANS_RTR2RTS,
280         QP_TRANS_RTS2RTS,
281         QP_TRANS_SQERR2RTS,
282         QP_TRANS_SQD2SQD,
283         QP_TRANS_SQD2RTS
284 };
285
286 /* For Debug uses */
287 static const char *resource_str(enum mlx4_resource rt)
288 {
289         switch (rt) {
290         case RES_QP: return "RES_QP";
291         case RES_CQ: return "RES_CQ";
292         case RES_SRQ: return "RES_SRQ";
293         case RES_MPT: return "RES_MPT";
294         case RES_MTT: return "RES_MTT";
295         case RES_MAC: return  "RES_MAC";
296         case RES_VLAN: return  "RES_VLAN";
297         case RES_EQ: return "RES_EQ";
298         case RES_COUNTER: return "RES_COUNTER";
299         case RES_FS_RULE: return "RES_FS_RULE";
300         case RES_XRCD: return "RES_XRCD";
301         default: return "Unknown resource type !!!";
302         };
303 }
304
305 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
306 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
307                                       enum mlx4_resource res_type, int count,
308                                       int port)
309 {
310         struct mlx4_priv *priv = mlx4_priv(dev);
311         struct resource_allocator *res_alloc =
312                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
313         int err = -EINVAL;
314         int allocated, free, reserved, guaranteed, from_free;
315         int from_rsvd;
316
317         if (slave > dev->persist->num_vfs)
318                 return -EINVAL;
319
320         spin_lock(&res_alloc->alloc_lock);
321         allocated = (port > 0) ?
322                 res_alloc->allocated[(port - 1) *
323                 (dev->persist->num_vfs + 1) + slave] :
324                 res_alloc->allocated[slave];
325         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
326                 res_alloc->res_free;
327         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
328                 res_alloc->res_reserved;
329         guaranteed = res_alloc->guaranteed[slave];
330
331         if (allocated + count > res_alloc->quota[slave]) {
332                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
333                           slave, port, resource_str(res_type), count,
334                           allocated, res_alloc->quota[slave]);
335                 goto out;
336         }
337
338         if (allocated + count <= guaranteed) {
339                 err = 0;
340                 from_rsvd = count;
341         } else {
342                 /* portion may need to be obtained from free area */
343                 if (guaranteed - allocated > 0)
344                         from_free = count - (guaranteed - allocated);
345                 else
346                         from_free = count;
347
348                 from_rsvd = count - from_free;
349
350                 if (free - from_free >= reserved)
351                         err = 0;
352                 else
353                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
354                                   slave, port, resource_str(res_type), free,
355                                   from_free, reserved);
356         }
357
358         if (!err) {
359                 /* grant the request */
360                 if (port > 0) {
361                         res_alloc->allocated[(port - 1) *
362                         (dev->persist->num_vfs + 1) + slave] += count;
363                         res_alloc->res_port_free[port - 1] -= count;
364                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
365                 } else {
366                         res_alloc->allocated[slave] += count;
367                         res_alloc->res_free -= count;
368                         res_alloc->res_reserved -= from_rsvd;
369                 }
370         }
371
372 out:
373         spin_unlock(&res_alloc->alloc_lock);
374         return err;
375 }
376
377 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
378                                     enum mlx4_resource res_type, int count,
379                                     int port)
380 {
381         struct mlx4_priv *priv = mlx4_priv(dev);
382         struct resource_allocator *res_alloc =
383                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
384         int allocated, guaranteed, from_rsvd;
385
386         if (slave > dev->persist->num_vfs)
387                 return;
388
389         spin_lock(&res_alloc->alloc_lock);
390
391         allocated = (port > 0) ?
392                 res_alloc->allocated[(port - 1) *
393                 (dev->persist->num_vfs + 1) + slave] :
394                 res_alloc->allocated[slave];
395         guaranteed = res_alloc->guaranteed[slave];
396
397         if (allocated - count >= guaranteed) {
398                 from_rsvd = 0;
399         } else {
400                 /* portion may need to be returned to reserved area */
401                 if (allocated - guaranteed > 0)
402                         from_rsvd = count - (allocated - guaranteed);
403                 else
404                         from_rsvd = count;
405         }
406
407         if (port > 0) {
408                 res_alloc->allocated[(port - 1) *
409                 (dev->persist->num_vfs + 1) + slave] -= count;
410                 res_alloc->res_port_free[port - 1] += count;
411                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
412         } else {
413                 res_alloc->allocated[slave] -= count;
414                 res_alloc->res_free += count;
415                 res_alloc->res_reserved += from_rsvd;
416         }
417
418         spin_unlock(&res_alloc->alloc_lock);
419         return;
420 }
421
422 static inline void initialize_res_quotas(struct mlx4_dev *dev,
423                                          struct resource_allocator *res_alloc,
424                                          enum mlx4_resource res_type,
425                                          int vf, int num_instances)
426 {
427         res_alloc->guaranteed[vf] = num_instances /
428                                     (2 * (dev->persist->num_vfs + 1));
429         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
430         if (vf == mlx4_master_func_num(dev)) {
431                 res_alloc->res_free = num_instances;
432                 if (res_type == RES_MTT) {
433                         /* reserved mtts will be taken out of the PF allocation */
434                         res_alloc->res_free += dev->caps.reserved_mtts;
435                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
436                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
437                 }
438         }
439 }
440
441 void mlx4_init_quotas(struct mlx4_dev *dev)
442 {
443         struct mlx4_priv *priv = mlx4_priv(dev);
444         int pf;
445
446         /* quotas for VFs are initialized in mlx4_slave_cap */
447         if (mlx4_is_slave(dev))
448                 return;
449
450         if (!mlx4_is_mfunc(dev)) {
451                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
452                         mlx4_num_reserved_sqps(dev);
453                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
454                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
455                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
456                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
457                 return;
458         }
459
460         pf = mlx4_master_func_num(dev);
461         dev->quotas.qp =
462                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
463         dev->quotas.cq =
464                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
465         dev->quotas.srq =
466                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
467         dev->quotas.mtt =
468                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
469         dev->quotas.mpt =
470                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
471 }
472
473 static int
474 mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
475                                  struct resource_allocator *res_alloc,
476                                  int vf)
477 {
478         struct mlx4_active_ports actv_ports;
479         int ports, counters_guaranteed;
480
481         /* For master, only allocate according to the number of phys ports */
482         if (vf == mlx4_master_func_num(dev))
483                 return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
484
485         /* calculate real number of ports for the VF */
486         actv_ports = mlx4_get_active_ports(dev, vf);
487         ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
488         counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
489
490         /* If we do not have enough counters for this VF, do not
491          * allocate any for it. '-1' to reduce the sink counter.
492          */
493         if ((res_alloc->res_reserved + counters_guaranteed) >
494             (dev->caps.max_counters - 1))
495                 return 0;
496
497         return counters_guaranteed;
498 }
499
500 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
501 {
502         struct mlx4_priv *priv = mlx4_priv(dev);
503         int i, j;
504         int t;
505
506         priv->mfunc.master.res_tracker.slave_list =
507                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
508                         GFP_KERNEL);
509         if (!priv->mfunc.master.res_tracker.slave_list)
510                 return -ENOMEM;
511
512         for (i = 0 ; i < dev->num_slaves; i++) {
513                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
514                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
515                                        slave_list[i].res_list[t]);
516                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
517         }
518
519         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
520                  dev->num_slaves);
521         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
522                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
523
524         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
525                 struct resource_allocator *res_alloc =
526                         &priv->mfunc.master.res_tracker.res_alloc[i];
527                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
528                                            sizeof(int), GFP_KERNEL);
529                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
530                                                 sizeof(int), GFP_KERNEL);
531                 if (i == RES_MAC || i == RES_VLAN)
532                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
533                                                        (dev->persist->num_vfs
534                                                        + 1) *
535                                                        sizeof(int), GFP_KERNEL);
536                 else
537                         res_alloc->allocated = kzalloc((dev->persist->
538                                                         num_vfs + 1) *
539                                                        sizeof(int), GFP_KERNEL);
540                 /* Reduce the sink counter */
541                 if (i == RES_COUNTER)
542                         res_alloc->res_free = dev->caps.max_counters - 1;
543
544                 if (!res_alloc->quota || !res_alloc->guaranteed ||
545                     !res_alloc->allocated)
546                         goto no_mem_err;
547
548                 spin_lock_init(&res_alloc->alloc_lock);
549                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
550                         struct mlx4_active_ports actv_ports =
551                                 mlx4_get_active_ports(dev, t);
552                         switch (i) {
553                         case RES_QP:
554                                 initialize_res_quotas(dev, res_alloc, RES_QP,
555                                                       t, dev->caps.num_qps -
556                                                       dev->caps.reserved_qps -
557                                                       mlx4_num_reserved_sqps(dev));
558                                 break;
559                         case RES_CQ:
560                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
561                                                       t, dev->caps.num_cqs -
562                                                       dev->caps.reserved_cqs);
563                                 break;
564                         case RES_SRQ:
565                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
566                                                       t, dev->caps.num_srqs -
567                                                       dev->caps.reserved_srqs);
568                                 break;
569                         case RES_MPT:
570                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
571                                                       t, dev->caps.num_mpts -
572                                                       dev->caps.reserved_mrws);
573                                 break;
574                         case RES_MTT:
575                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
576                                                       t, dev->caps.num_mtts -
577                                                       dev->caps.reserved_mtts);
578                                 break;
579                         case RES_MAC:
580                                 if (t == mlx4_master_func_num(dev)) {
581                                         int max_vfs_pport = 0;
582                                         /* Calculate the max vfs per port for */
583                                         /* both ports.                        */
584                                         for (j = 0; j < dev->caps.num_ports;
585                                              j++) {
586                                                 struct mlx4_slaves_pport slaves_pport =
587                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
588                                                 unsigned current_slaves =
589                                                         bitmap_weight(slaves_pport.slaves,
590                                                                       dev->caps.num_ports) - 1;
591                                                 if (max_vfs_pport < current_slaves)
592                                                         max_vfs_pport =
593                                                                 current_slaves;
594                                         }
595                                         res_alloc->quota[t] =
596                                                 MLX4_MAX_MAC_NUM -
597                                                 2 * max_vfs_pport;
598                                         res_alloc->guaranteed[t] = 2;
599                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
600                                                 res_alloc->res_port_free[j] =
601                                                         MLX4_MAX_MAC_NUM;
602                                 } else {
603                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
604                                         res_alloc->guaranteed[t] = 2;
605                                 }
606                                 break;
607                         case RES_VLAN:
608                                 if (t == mlx4_master_func_num(dev)) {
609                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
610                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
611                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
612                                                 res_alloc->res_port_free[j] =
613                                                         res_alloc->quota[t];
614                                 } else {
615                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
616                                         res_alloc->guaranteed[t] = 0;
617                                 }
618                                 break;
619                         case RES_COUNTER:
620                                 res_alloc->quota[t] = dev->caps.max_counters;
621                                 res_alloc->guaranteed[t] =
622                                         mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
623                                 res_alloc->res_free -= res_alloc->guaranteed[t];
624                                 break;
625                         default:
626                                 break;
627                         }
628                         if (i == RES_MAC || i == RES_VLAN) {
629                                 for (j = 0; j < dev->caps.num_ports; j++)
630                                         if (test_bit(j, actv_ports.ports))
631                                                 res_alloc->res_port_rsvd[j] +=
632                                                         res_alloc->guaranteed[t];
633                         } else {
634                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
635                         }
636                 }
637         }
638         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
639         return 0;
640
641 no_mem_err:
642         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
643                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
644                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
645                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
646                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
647                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
648                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
649         }
650         return -ENOMEM;
651 }
652
653 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
654                                 enum mlx4_res_tracker_free_type type)
655 {
656         struct mlx4_priv *priv = mlx4_priv(dev);
657         int i;
658
659         if (priv->mfunc.master.res_tracker.slave_list) {
660                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
661                         for (i = 0; i < dev->num_slaves; i++) {
662                                 if (type == RES_TR_FREE_ALL ||
663                                     dev->caps.function != i)
664                                         mlx4_delete_all_resources_for_slave(dev, i);
665                         }
666                         /* free master's vlans */
667                         i = dev->caps.function;
668                         mlx4_reset_roce_gids(dev, i);
669                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
670                         rem_slave_vlans(dev, i);
671                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
672                 }
673
674                 if (type != RES_TR_FREE_SLAVES_ONLY) {
675                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
676                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
677                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
678                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
679                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
680                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
681                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
682                         }
683                         kfree(priv->mfunc.master.res_tracker.slave_list);
684                         priv->mfunc.master.res_tracker.slave_list = NULL;
685                 }
686         }
687 }
688
689 static void update_pkey_index(struct mlx4_dev *dev, int slave,
690                               struct mlx4_cmd_mailbox *inbox)
691 {
692         u8 sched = *(u8 *)(inbox->buf + 64);
693         u8 orig_index = *(u8 *)(inbox->buf + 35);
694         u8 new_index;
695         struct mlx4_priv *priv = mlx4_priv(dev);
696         int port;
697
698         port = (sched >> 6 & 1) + 1;
699
700         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
701         *(u8 *)(inbox->buf + 35) = new_index;
702 }
703
704 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
705                        u8 slave)
706 {
707         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
708         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
709         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
710         int port;
711
712         if (MLX4_QP_ST_UD == ts) {
713                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
714                 if (mlx4_is_eth(dev, port))
715                         qp_ctx->pri_path.mgid_index =
716                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
717                 else
718                         qp_ctx->pri_path.mgid_index = slave | 0x80;
719
720         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
721                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
722                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
723                         if (mlx4_is_eth(dev, port)) {
724                                 qp_ctx->pri_path.mgid_index +=
725                                         mlx4_get_base_gid_ix(dev, slave, port);
726                                 qp_ctx->pri_path.mgid_index &= 0x7f;
727                         } else {
728                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
729                         }
730                 }
731                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
732                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
733                         if (mlx4_is_eth(dev, port)) {
734                                 qp_ctx->alt_path.mgid_index +=
735                                         mlx4_get_base_gid_ix(dev, slave, port);
736                                 qp_ctx->alt_path.mgid_index &= 0x7f;
737                         } else {
738                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
739                         }
740                 }
741         }
742 }
743
744 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
745                           u8 slave, int port);
746
747 static int update_vport_qp_param(struct mlx4_dev *dev,
748                                  struct mlx4_cmd_mailbox *inbox,
749                                  u8 slave, u32 qpn)
750 {
751         struct mlx4_qp_context  *qpc = inbox->buf + 8;
752         struct mlx4_vport_oper_state *vp_oper;
753         struct mlx4_priv *priv;
754         u32 qp_type;
755         int port, err = 0;
756
757         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
758         priv = mlx4_priv(dev);
759         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
760         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
761
762         err = handle_counter(dev, qpc, slave, port);
763         if (err)
764                 goto out;
765
766         if (MLX4_VGT != vp_oper->state.default_vlan) {
767                 /* the reserved QPs (special, proxy, tunnel)
768                  * do not operate over vlans
769                  */
770                 if (mlx4_is_qp_reserved(dev, qpn))
771                         return 0;
772
773                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
774                 if (qp_type == MLX4_QP_ST_UD ||
775                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
776                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
777                                 *(__be32 *)inbox->buf =
778                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
779                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
780                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
781                         } else {
782                                 struct mlx4_update_qp_params params = {.flags = 0};
783
784                                 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
785                                 if (err)
786                                         goto out;
787                         }
788                 }
789
790                 /* preserve IF_COUNTER flag */
791                 qpc->pri_path.vlan_control &=
792                         MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
793                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
794                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
795                         qpc->pri_path.vlan_control |=
796                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
797                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
798                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
799                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
800                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
801                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
802                 } else if (0 != vp_oper->state.default_vlan) {
803                         if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
804                                 /* vst QinQ should block untagged on TX,
805                                  * but cvlan is in payload and phv is set so
806                                  * hw see it as untagged. Block tagged instead.
807                                  */
808                                 qpc->pri_path.vlan_control |=
809                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
810                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
811                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
812                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
813                         } else { /* vst 802.1Q */
814                                 qpc->pri_path.vlan_control |=
815                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
816                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
817                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
818                         }
819                 } else { /* priority tagged */
820                         qpc->pri_path.vlan_control |=
821                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
822                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
823                 }
824
825                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
826                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
827                 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
828                 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
829                         qpc->pri_path.fl |= MLX4_FL_SV;
830                 else
831                         qpc->pri_path.fl |= MLX4_FL_CV;
832                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
833                 qpc->pri_path.sched_queue &= 0xC7;
834                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
835                 qpc->qos_vport = vp_oper->state.qos_vport;
836         }
837         if (vp_oper->state.spoofchk) {
838                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
839                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
840         }
841 out:
842         return err;
843 }
844
845 static int mpt_mask(struct mlx4_dev *dev)
846 {
847         return dev->caps.num_mpts - 1;
848 }
849
850 static void *find_res(struct mlx4_dev *dev, u64 res_id,
851                       enum mlx4_resource type)
852 {
853         struct mlx4_priv *priv = mlx4_priv(dev);
854
855         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
856                                   res_id);
857 }
858
859 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
860                    enum mlx4_resource type,
861                    void *res)
862 {
863         struct res_common *r;
864         int err = 0;
865
866         spin_lock_irq(mlx4_tlock(dev));
867         r = find_res(dev, res_id, type);
868         if (!r) {
869                 err = -ENONET;
870                 goto exit;
871         }
872
873         if (r->state == RES_ANY_BUSY) {
874                 err = -EBUSY;
875                 goto exit;
876         }
877
878         if (r->owner != slave) {
879                 err = -EPERM;
880                 goto exit;
881         }
882
883         r->from_state = r->state;
884         r->state = RES_ANY_BUSY;
885
886         if (res)
887                 *((struct res_common **)res) = r;
888
889 exit:
890         spin_unlock_irq(mlx4_tlock(dev));
891         return err;
892 }
893
894 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
895                                     enum mlx4_resource type,
896                                     u64 res_id, int *slave)
897 {
898
899         struct res_common *r;
900         int err = -ENOENT;
901         int id = res_id;
902
903         if (type == RES_QP)
904                 id &= 0x7fffff;
905         spin_lock(mlx4_tlock(dev));
906
907         r = find_res(dev, id, type);
908         if (r) {
909                 *slave = r->owner;
910                 err = 0;
911         }
912         spin_unlock(mlx4_tlock(dev));
913
914         return err;
915 }
916
917 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
918                     enum mlx4_resource type)
919 {
920         struct res_common *r;
921
922         spin_lock_irq(mlx4_tlock(dev));
923         r = find_res(dev, res_id, type);
924         if (r)
925                 r->state = r->from_state;
926         spin_unlock_irq(mlx4_tlock(dev));
927 }
928
929 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
930                              u64 in_param, u64 *out_param, int port);
931
932 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
933                                    int counter_index)
934 {
935         struct res_common *r;
936         struct res_counter *counter;
937         int ret = 0;
938
939         if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
940                 return ret;
941
942         spin_lock_irq(mlx4_tlock(dev));
943         r = find_res(dev, counter_index, RES_COUNTER);
944         if (!r || r->owner != slave) {
945                 ret = -EINVAL;
946         } else {
947                 counter = container_of(r, struct res_counter, com);
948                 if (!counter->port)
949                         counter->port = port;
950         }
951
952         spin_unlock_irq(mlx4_tlock(dev));
953         return ret;
954 }
955
956 static int handle_unexisting_counter(struct mlx4_dev *dev,
957                                      struct mlx4_qp_context *qpc, u8 slave,
958                                      int port)
959 {
960         struct mlx4_priv *priv = mlx4_priv(dev);
961         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
962         struct res_common *tmp;
963         struct res_counter *counter;
964         u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
965         int err = 0;
966
967         spin_lock_irq(mlx4_tlock(dev));
968         list_for_each_entry(tmp,
969                             &tracker->slave_list[slave].res_list[RES_COUNTER],
970                             list) {
971                 counter = container_of(tmp, struct res_counter, com);
972                 if (port == counter->port) {
973                         qpc->pri_path.counter_index  = counter->com.res_id;
974                         spin_unlock_irq(mlx4_tlock(dev));
975                         return 0;
976                 }
977         }
978         spin_unlock_irq(mlx4_tlock(dev));
979
980         /* No existing counter, need to allocate a new counter */
981         err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
982                                 port);
983         if (err == -ENOENT) {
984                 err = 0;
985         } else if (err && err != -ENOSPC) {
986                 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
987                          __func__, slave, err);
988         } else {
989                 qpc->pri_path.counter_index = counter_idx;
990                 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
991                          __func__, slave, qpc->pri_path.counter_index);
992                 err = 0;
993         }
994
995         return err;
996 }
997
998 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
999                           u8 slave, int port)
1000 {
1001         if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1002                 return handle_existing_counter(dev, slave, port,
1003                                                qpc->pri_path.counter_index);
1004
1005         return handle_unexisting_counter(dev, qpc, slave, port);
1006 }
1007
1008 static struct res_common *alloc_qp_tr(int id)
1009 {
1010         struct res_qp *ret;
1011
1012         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1013         if (!ret)
1014                 return NULL;
1015
1016         ret->com.res_id = id;
1017         ret->com.state = RES_QP_RESERVED;
1018         ret->local_qpn = id;
1019         INIT_LIST_HEAD(&ret->mcg_list);
1020         spin_lock_init(&ret->mcg_spl);
1021         atomic_set(&ret->ref_count, 0);
1022
1023         return &ret->com;
1024 }
1025
1026 static struct res_common *alloc_mtt_tr(int id, int order)
1027 {
1028         struct res_mtt *ret;
1029
1030         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1031         if (!ret)
1032                 return NULL;
1033
1034         ret->com.res_id = id;
1035         ret->order = order;
1036         ret->com.state = RES_MTT_ALLOCATED;
1037         atomic_set(&ret->ref_count, 0);
1038
1039         return &ret->com;
1040 }
1041
1042 static struct res_common *alloc_mpt_tr(int id, int key)
1043 {
1044         struct res_mpt *ret;
1045
1046         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1047         if (!ret)
1048                 return NULL;
1049
1050         ret->com.res_id = id;
1051         ret->com.state = RES_MPT_RESERVED;
1052         ret->key = key;
1053
1054         return &ret->com;
1055 }
1056
1057 static struct res_common *alloc_eq_tr(int id)
1058 {
1059         struct res_eq *ret;
1060
1061         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1062         if (!ret)
1063                 return NULL;
1064
1065         ret->com.res_id = id;
1066         ret->com.state = RES_EQ_RESERVED;
1067
1068         return &ret->com;
1069 }
1070
1071 static struct res_common *alloc_cq_tr(int id)
1072 {
1073         struct res_cq *ret;
1074
1075         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1076         if (!ret)
1077                 return NULL;
1078
1079         ret->com.res_id = id;
1080         ret->com.state = RES_CQ_ALLOCATED;
1081         atomic_set(&ret->ref_count, 0);
1082
1083         return &ret->com;
1084 }
1085
1086 static struct res_common *alloc_srq_tr(int id)
1087 {
1088         struct res_srq *ret;
1089
1090         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1091         if (!ret)
1092                 return NULL;
1093
1094         ret->com.res_id = id;
1095         ret->com.state = RES_SRQ_ALLOCATED;
1096         atomic_set(&ret->ref_count, 0);
1097
1098         return &ret->com;
1099 }
1100
1101 static struct res_common *alloc_counter_tr(int id, int port)
1102 {
1103         struct res_counter *ret;
1104
1105         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1106         if (!ret)
1107                 return NULL;
1108
1109         ret->com.res_id = id;
1110         ret->com.state = RES_COUNTER_ALLOCATED;
1111         ret->port = port;
1112
1113         return &ret->com;
1114 }
1115
1116 static struct res_common *alloc_xrcdn_tr(int id)
1117 {
1118         struct res_xrcdn *ret;
1119
1120         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1121         if (!ret)
1122                 return NULL;
1123
1124         ret->com.res_id = id;
1125         ret->com.state = RES_XRCD_ALLOCATED;
1126
1127         return &ret->com;
1128 }
1129
1130 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1131 {
1132         struct res_fs_rule *ret;
1133
1134         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1135         if (!ret)
1136                 return NULL;
1137
1138         ret->com.res_id = id;
1139         ret->com.state = RES_FS_RULE_ALLOCATED;
1140         ret->qpn = qpn;
1141         return &ret->com;
1142 }
1143
1144 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1145                                    int extra)
1146 {
1147         struct res_common *ret;
1148
1149         switch (type) {
1150         case RES_QP:
1151                 ret = alloc_qp_tr(id);
1152                 break;
1153         case RES_MPT:
1154                 ret = alloc_mpt_tr(id, extra);
1155                 break;
1156         case RES_MTT:
1157                 ret = alloc_mtt_tr(id, extra);
1158                 break;
1159         case RES_EQ:
1160                 ret = alloc_eq_tr(id);
1161                 break;
1162         case RES_CQ:
1163                 ret = alloc_cq_tr(id);
1164                 break;
1165         case RES_SRQ:
1166                 ret = alloc_srq_tr(id);
1167                 break;
1168         case RES_MAC:
1169                 pr_err("implementation missing\n");
1170                 return NULL;
1171         case RES_COUNTER:
1172                 ret = alloc_counter_tr(id, extra);
1173                 break;
1174         case RES_XRCD:
1175                 ret = alloc_xrcdn_tr(id);
1176                 break;
1177         case RES_FS_RULE:
1178                 ret = alloc_fs_rule_tr(id, extra);
1179                 break;
1180         default:
1181                 return NULL;
1182         }
1183         if (ret)
1184                 ret->owner = slave;
1185
1186         return ret;
1187 }
1188
1189 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1190                           struct mlx4_counter *data)
1191 {
1192         struct mlx4_priv *priv = mlx4_priv(dev);
1193         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1194         struct res_common *tmp;
1195         struct res_counter *counter;
1196         int *counters_arr;
1197         int i = 0, err = 0;
1198
1199         memset(data, 0, sizeof(*data));
1200
1201         counters_arr = kmalloc_array(dev->caps.max_counters,
1202                                      sizeof(*counters_arr), GFP_KERNEL);
1203         if (!counters_arr)
1204                 return -ENOMEM;
1205
1206         spin_lock_irq(mlx4_tlock(dev));
1207         list_for_each_entry(tmp,
1208                             &tracker->slave_list[slave].res_list[RES_COUNTER],
1209                             list) {
1210                 counter = container_of(tmp, struct res_counter, com);
1211                 if (counter->port == port) {
1212                         counters_arr[i] = (int)tmp->res_id;
1213                         i++;
1214                 }
1215         }
1216         spin_unlock_irq(mlx4_tlock(dev));
1217         counters_arr[i] = -1;
1218
1219         i = 0;
1220
1221         while (counters_arr[i] != -1) {
1222                 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1223                                              0);
1224                 if (err) {
1225                         memset(data, 0, sizeof(*data));
1226                         goto table_changed;
1227                 }
1228                 i++;
1229         }
1230
1231 table_changed:
1232         kfree(counters_arr);
1233         return 0;
1234 }
1235
1236 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1237                          enum mlx4_resource type, int extra)
1238 {
1239         int i;
1240         int err;
1241         struct mlx4_priv *priv = mlx4_priv(dev);
1242         struct res_common **res_arr;
1243         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1244         struct rb_root *root = &tracker->res_tree[type];
1245
1246         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1247         if (!res_arr)
1248                 return -ENOMEM;
1249
1250         for (i = 0; i < count; ++i) {
1251                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1252                 if (!res_arr[i]) {
1253                         for (--i; i >= 0; --i)
1254                                 kfree(res_arr[i]);
1255
1256                         kfree(res_arr);
1257                         return -ENOMEM;
1258                 }
1259         }
1260
1261         spin_lock_irq(mlx4_tlock(dev));
1262         for (i = 0; i < count; ++i) {
1263                 if (find_res(dev, base + i, type)) {
1264                         err = -EEXIST;
1265                         goto undo;
1266                 }
1267                 err = res_tracker_insert(root, res_arr[i]);
1268                 if (err)
1269                         goto undo;
1270                 list_add_tail(&res_arr[i]->list,
1271                               &tracker->slave_list[slave].res_list[type]);
1272         }
1273         spin_unlock_irq(mlx4_tlock(dev));
1274         kfree(res_arr);
1275
1276         return 0;
1277
1278 undo:
1279         for (--i; i >= 0; --i) {
1280                 rb_erase(&res_arr[i]->node, root);
1281                 list_del_init(&res_arr[i]->list);
1282         }
1283
1284         spin_unlock_irq(mlx4_tlock(dev));
1285
1286         for (i = 0; i < count; ++i)
1287                 kfree(res_arr[i]);
1288
1289         kfree(res_arr);
1290
1291         return err;
1292 }
1293
1294 static int remove_qp_ok(struct res_qp *res)
1295 {
1296         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1297             !list_empty(&res->mcg_list)) {
1298                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1299                        res->com.state, atomic_read(&res->ref_count));
1300                 return -EBUSY;
1301         } else if (res->com.state != RES_QP_RESERVED) {
1302                 return -EPERM;
1303         }
1304
1305         return 0;
1306 }
1307
1308 static int remove_mtt_ok(struct res_mtt *res, int order)
1309 {
1310         if (res->com.state == RES_MTT_BUSY ||
1311             atomic_read(&res->ref_count)) {
1312                 pr_devel("%s-%d: state %s, ref_count %d\n",
1313                          __func__, __LINE__,
1314                          mtt_states_str(res->com.state),
1315                          atomic_read(&res->ref_count));
1316                 return -EBUSY;
1317         } else if (res->com.state != RES_MTT_ALLOCATED)
1318                 return -EPERM;
1319         else if (res->order != order)
1320                 return -EINVAL;
1321
1322         return 0;
1323 }
1324
1325 static int remove_mpt_ok(struct res_mpt *res)
1326 {
1327         if (res->com.state == RES_MPT_BUSY)
1328                 return -EBUSY;
1329         else if (res->com.state != RES_MPT_RESERVED)
1330                 return -EPERM;
1331
1332         return 0;
1333 }
1334
1335 static int remove_eq_ok(struct res_eq *res)
1336 {
1337         if (res->com.state == RES_MPT_BUSY)
1338                 return -EBUSY;
1339         else if (res->com.state != RES_MPT_RESERVED)
1340                 return -EPERM;
1341
1342         return 0;
1343 }
1344
1345 static int remove_counter_ok(struct res_counter *res)
1346 {
1347         if (res->com.state == RES_COUNTER_BUSY)
1348                 return -EBUSY;
1349         else if (res->com.state != RES_COUNTER_ALLOCATED)
1350                 return -EPERM;
1351
1352         return 0;
1353 }
1354
1355 static int remove_xrcdn_ok(struct res_xrcdn *res)
1356 {
1357         if (res->com.state == RES_XRCD_BUSY)
1358                 return -EBUSY;
1359         else if (res->com.state != RES_XRCD_ALLOCATED)
1360                 return -EPERM;
1361
1362         return 0;
1363 }
1364
1365 static int remove_fs_rule_ok(struct res_fs_rule *res)
1366 {
1367         if (res->com.state == RES_FS_RULE_BUSY)
1368                 return -EBUSY;
1369         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1370                 return -EPERM;
1371
1372         return 0;
1373 }
1374
1375 static int remove_cq_ok(struct res_cq *res)
1376 {
1377         if (res->com.state == RES_CQ_BUSY)
1378                 return -EBUSY;
1379         else if (res->com.state != RES_CQ_ALLOCATED)
1380                 return -EPERM;
1381
1382         return 0;
1383 }
1384
1385 static int remove_srq_ok(struct res_srq *res)
1386 {
1387         if (res->com.state == RES_SRQ_BUSY)
1388                 return -EBUSY;
1389         else if (res->com.state != RES_SRQ_ALLOCATED)
1390                 return -EPERM;
1391
1392         return 0;
1393 }
1394
1395 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1396 {
1397         switch (type) {
1398         case RES_QP:
1399                 return remove_qp_ok((struct res_qp *)res);
1400         case RES_CQ:
1401                 return remove_cq_ok((struct res_cq *)res);
1402         case RES_SRQ:
1403                 return remove_srq_ok((struct res_srq *)res);
1404         case RES_MPT:
1405                 return remove_mpt_ok((struct res_mpt *)res);
1406         case RES_MTT:
1407                 return remove_mtt_ok((struct res_mtt *)res, extra);
1408         case RES_MAC:
1409                 return -ENOSYS;
1410         case RES_EQ:
1411                 return remove_eq_ok((struct res_eq *)res);
1412         case RES_COUNTER:
1413                 return remove_counter_ok((struct res_counter *)res);
1414         case RES_XRCD:
1415                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1416         case RES_FS_RULE:
1417                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1418         default:
1419                 return -EINVAL;
1420         }
1421 }
1422
1423 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1424                          enum mlx4_resource type, int extra)
1425 {
1426         u64 i;
1427         int err;
1428         struct mlx4_priv *priv = mlx4_priv(dev);
1429         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1430         struct res_common *r;
1431
1432         spin_lock_irq(mlx4_tlock(dev));
1433         for (i = base; i < base + count; ++i) {
1434                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1435                 if (!r) {
1436                         err = -ENOENT;
1437                         goto out;
1438                 }
1439                 if (r->owner != slave) {
1440                         err = -EPERM;
1441                         goto out;
1442                 }
1443                 err = remove_ok(r, type, extra);
1444                 if (err)
1445                         goto out;
1446         }
1447
1448         for (i = base; i < base + count; ++i) {
1449                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1450                 rb_erase(&r->node, &tracker->res_tree[type]);
1451                 list_del(&r->list);
1452                 kfree(r);
1453         }
1454         err = 0;
1455
1456 out:
1457         spin_unlock_irq(mlx4_tlock(dev));
1458
1459         return err;
1460 }
1461
1462 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1463                                 enum res_qp_states state, struct res_qp **qp,
1464                                 int alloc)
1465 {
1466         struct mlx4_priv *priv = mlx4_priv(dev);
1467         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1468         struct res_qp *r;
1469         int err = 0;
1470
1471         spin_lock_irq(mlx4_tlock(dev));
1472         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1473         if (!r)
1474                 err = -ENOENT;
1475         else if (r->com.owner != slave)
1476                 err = -EPERM;
1477         else {
1478                 switch (state) {
1479                 case RES_QP_BUSY:
1480                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1481                                  __func__, r->com.res_id);
1482                         err = -EBUSY;
1483                         break;
1484
1485                 case RES_QP_RESERVED:
1486                         if (r->com.state == RES_QP_MAPPED && !alloc)
1487                                 break;
1488
1489                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1490                         err = -EINVAL;
1491                         break;
1492
1493                 case RES_QP_MAPPED:
1494                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1495                             r->com.state == RES_QP_HW)
1496                                 break;
1497                         else {
1498                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1499                                           r->com.res_id);
1500                                 err = -EINVAL;
1501                         }
1502
1503                         break;
1504
1505                 case RES_QP_HW:
1506                         if (r->com.state != RES_QP_MAPPED)
1507                                 err = -EINVAL;
1508                         break;
1509                 default:
1510                         err = -EINVAL;
1511                 }
1512
1513                 if (!err) {
1514                         r->com.from_state = r->com.state;
1515                         r->com.to_state = state;
1516                         r->com.state = RES_QP_BUSY;
1517                         if (qp)
1518                                 *qp = r;
1519                 }
1520         }
1521
1522         spin_unlock_irq(mlx4_tlock(dev));
1523
1524         return err;
1525 }
1526
1527 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1528                                 enum res_mpt_states state, struct res_mpt **mpt)
1529 {
1530         struct mlx4_priv *priv = mlx4_priv(dev);
1531         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1532         struct res_mpt *r;
1533         int err = 0;
1534
1535         spin_lock_irq(mlx4_tlock(dev));
1536         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1537         if (!r)
1538                 err = -ENOENT;
1539         else if (r->com.owner != slave)
1540                 err = -EPERM;
1541         else {
1542                 switch (state) {
1543                 case RES_MPT_BUSY:
1544                         err = -EINVAL;
1545                         break;
1546
1547                 case RES_MPT_RESERVED:
1548                         if (r->com.state != RES_MPT_MAPPED)
1549                                 err = -EINVAL;
1550                         break;
1551
1552                 case RES_MPT_MAPPED:
1553                         if (r->com.state != RES_MPT_RESERVED &&
1554                             r->com.state != RES_MPT_HW)
1555                                 err = -EINVAL;
1556                         break;
1557
1558                 case RES_MPT_HW:
1559                         if (r->com.state != RES_MPT_MAPPED)
1560                                 err = -EINVAL;
1561                         break;
1562                 default:
1563                         err = -EINVAL;
1564                 }
1565
1566                 if (!err) {
1567                         r->com.from_state = r->com.state;
1568                         r->com.to_state = state;
1569                         r->com.state = RES_MPT_BUSY;
1570                         if (mpt)
1571                                 *mpt = r;
1572                 }
1573         }
1574
1575         spin_unlock_irq(mlx4_tlock(dev));
1576
1577         return err;
1578 }
1579
1580 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1581                                 enum res_eq_states state, struct res_eq **eq)
1582 {
1583         struct mlx4_priv *priv = mlx4_priv(dev);
1584         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1585         struct res_eq *r;
1586         int err = 0;
1587
1588         spin_lock_irq(mlx4_tlock(dev));
1589         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1590         if (!r)
1591                 err = -ENOENT;
1592         else if (r->com.owner != slave)
1593                 err = -EPERM;
1594         else {
1595                 switch (state) {
1596                 case RES_EQ_BUSY:
1597                         err = -EINVAL;
1598                         break;
1599
1600                 case RES_EQ_RESERVED:
1601                         if (r->com.state != RES_EQ_HW)
1602                                 err = -EINVAL;
1603                         break;
1604
1605                 case RES_EQ_HW:
1606                         if (r->com.state != RES_EQ_RESERVED)
1607                                 err = -EINVAL;
1608                         break;
1609
1610                 default:
1611                         err = -EINVAL;
1612                 }
1613
1614                 if (!err) {
1615                         r->com.from_state = r->com.state;
1616                         r->com.to_state = state;
1617                         r->com.state = RES_EQ_BUSY;
1618                 }
1619         }
1620
1621         spin_unlock_irq(mlx4_tlock(dev));
1622
1623         if (!err && eq)
1624                 *eq = r;
1625
1626         return err;
1627 }
1628
1629 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1630                                 enum res_cq_states state, struct res_cq **cq)
1631 {
1632         struct mlx4_priv *priv = mlx4_priv(dev);
1633         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1634         struct res_cq *r;
1635         int err;
1636
1637         spin_lock_irq(mlx4_tlock(dev));
1638         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1639         if (!r) {
1640                 err = -ENOENT;
1641         } else if (r->com.owner != slave) {
1642                 err = -EPERM;
1643         } else if (state == RES_CQ_ALLOCATED) {
1644                 if (r->com.state != RES_CQ_HW)
1645                         err = -EINVAL;
1646                 else if (atomic_read(&r->ref_count))
1647                         err = -EBUSY;
1648                 else
1649                         err = 0;
1650         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1651                 err = -EINVAL;
1652         } else {
1653                 err = 0;
1654         }
1655
1656         if (!err) {
1657                 r->com.from_state = r->com.state;
1658                 r->com.to_state = state;
1659                 r->com.state = RES_CQ_BUSY;
1660                 if (cq)
1661                         *cq = r;
1662         }
1663
1664         spin_unlock_irq(mlx4_tlock(dev));
1665
1666         return err;
1667 }
1668
1669 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1670                                  enum res_srq_states state, struct res_srq **srq)
1671 {
1672         struct mlx4_priv *priv = mlx4_priv(dev);
1673         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1674         struct res_srq *r;
1675         int err = 0;
1676
1677         spin_lock_irq(mlx4_tlock(dev));
1678         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1679         if (!r) {
1680                 err = -ENOENT;
1681         } else if (r->com.owner != slave) {
1682                 err = -EPERM;
1683         } else if (state == RES_SRQ_ALLOCATED) {
1684                 if (r->com.state != RES_SRQ_HW)
1685                         err = -EINVAL;
1686                 else if (atomic_read(&r->ref_count))
1687                         err = -EBUSY;
1688         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1689                 err = -EINVAL;
1690         }
1691
1692         if (!err) {
1693                 r->com.from_state = r->com.state;
1694                 r->com.to_state = state;
1695                 r->com.state = RES_SRQ_BUSY;
1696                 if (srq)
1697                         *srq = r;
1698         }
1699
1700         spin_unlock_irq(mlx4_tlock(dev));
1701
1702         return err;
1703 }
1704
1705 static void res_abort_move(struct mlx4_dev *dev, int slave,
1706                            enum mlx4_resource type, int id)
1707 {
1708         struct mlx4_priv *priv = mlx4_priv(dev);
1709         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1710         struct res_common *r;
1711
1712         spin_lock_irq(mlx4_tlock(dev));
1713         r = res_tracker_lookup(&tracker->res_tree[type], id);
1714         if (r && (r->owner == slave))
1715                 r->state = r->from_state;
1716         spin_unlock_irq(mlx4_tlock(dev));
1717 }
1718
1719 static void res_end_move(struct mlx4_dev *dev, int slave,
1720                          enum mlx4_resource type, int id)
1721 {
1722         struct mlx4_priv *priv = mlx4_priv(dev);
1723         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1724         struct res_common *r;
1725
1726         spin_lock_irq(mlx4_tlock(dev));
1727         r = res_tracker_lookup(&tracker->res_tree[type], id);
1728         if (r && (r->owner == slave))
1729                 r->state = r->to_state;
1730         spin_unlock_irq(mlx4_tlock(dev));
1731 }
1732
1733 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1734 {
1735         return mlx4_is_qp_reserved(dev, qpn) &&
1736                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1737 }
1738
1739 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1740 {
1741         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1742 }
1743
1744 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1745                         u64 in_param, u64 *out_param)
1746 {
1747         int err;
1748         int count;
1749         int align;
1750         int base;
1751         int qpn;
1752         u8 flags;
1753
1754         switch (op) {
1755         case RES_OP_RESERVE:
1756                 count = get_param_l(&in_param) & 0xffffff;
1757                 /* Turn off all unsupported QP allocation flags that the
1758                  * slave tries to set.
1759                  */
1760                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1761                 align = get_param_h(&in_param);
1762                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1763                 if (err)
1764                         return err;
1765
1766                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1767                 if (err) {
1768                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1769                         return err;
1770                 }
1771
1772                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1773                 if (err) {
1774                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1775                         __mlx4_qp_release_range(dev, base, count);
1776                         return err;
1777                 }
1778                 set_param_l(out_param, base);
1779                 break;
1780         case RES_OP_MAP_ICM:
1781                 qpn = get_param_l(&in_param) & 0x7fffff;
1782                 if (valid_reserved(dev, slave, qpn)) {
1783                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1784                         if (err)
1785                                 return err;
1786                 }
1787
1788                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1789                                            NULL, 1);
1790                 if (err)
1791                         return err;
1792
1793                 if (!fw_reserved(dev, qpn)) {
1794                         err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1795                         if (err) {
1796                                 res_abort_move(dev, slave, RES_QP, qpn);
1797                                 return err;
1798                         }
1799                 }
1800
1801                 res_end_move(dev, slave, RES_QP, qpn);
1802                 break;
1803
1804         default:
1805                 err = -EINVAL;
1806                 break;
1807         }
1808         return err;
1809 }
1810
1811 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1812                          u64 in_param, u64 *out_param)
1813 {
1814         int err = -EINVAL;
1815         int base;
1816         int order;
1817
1818         if (op != RES_OP_RESERVE_AND_MAP)
1819                 return err;
1820
1821         order = get_param_l(&in_param);
1822
1823         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1824         if (err)
1825                 return err;
1826
1827         base = __mlx4_alloc_mtt_range(dev, order);
1828         if (base == -1) {
1829                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1830                 return -ENOMEM;
1831         }
1832
1833         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1834         if (err) {
1835                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1836                 __mlx4_free_mtt_range(dev, base, order);
1837         } else {
1838                 set_param_l(out_param, base);
1839         }
1840
1841         return err;
1842 }
1843
1844 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1845                          u64 in_param, u64 *out_param)
1846 {
1847         int err = -EINVAL;
1848         int index;
1849         int id;
1850         struct res_mpt *mpt;
1851
1852         switch (op) {
1853         case RES_OP_RESERVE:
1854                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1855                 if (err)
1856                         break;
1857
1858                 index = __mlx4_mpt_reserve(dev);
1859                 if (index == -1) {
1860                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1861                         break;
1862                 }
1863                 id = index & mpt_mask(dev);
1864
1865                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1866                 if (err) {
1867                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1868                         __mlx4_mpt_release(dev, index);
1869                         break;
1870                 }
1871                 set_param_l(out_param, index);
1872                 break;
1873         case RES_OP_MAP_ICM:
1874                 index = get_param_l(&in_param);
1875                 id = index & mpt_mask(dev);
1876                 err = mr_res_start_move_to(dev, slave, id,
1877                                            RES_MPT_MAPPED, &mpt);
1878                 if (err)
1879                         return err;
1880
1881                 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1882                 if (err) {
1883                         res_abort_move(dev, slave, RES_MPT, id);
1884                         return err;
1885                 }
1886
1887                 res_end_move(dev, slave, RES_MPT, id);
1888                 break;
1889         }
1890         return err;
1891 }
1892
1893 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1894                         u64 in_param, u64 *out_param)
1895 {
1896         int cqn;
1897         int err;
1898
1899         switch (op) {
1900         case RES_OP_RESERVE_AND_MAP:
1901                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1902                 if (err)
1903                         break;
1904
1905                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1906                 if (err) {
1907                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1908                         break;
1909                 }
1910
1911                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1912                 if (err) {
1913                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1914                         __mlx4_cq_free_icm(dev, cqn);
1915                         break;
1916                 }
1917
1918                 set_param_l(out_param, cqn);
1919                 break;
1920
1921         default:
1922                 err = -EINVAL;
1923         }
1924
1925         return err;
1926 }
1927
1928 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1929                          u64 in_param, u64 *out_param)
1930 {
1931         int srqn;
1932         int err;
1933
1934         switch (op) {
1935         case RES_OP_RESERVE_AND_MAP:
1936                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1937                 if (err)
1938                         break;
1939
1940                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1941                 if (err) {
1942                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1943                         break;
1944                 }
1945
1946                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1947                 if (err) {
1948                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1949                         __mlx4_srq_free_icm(dev, srqn);
1950                         break;
1951                 }
1952
1953                 set_param_l(out_param, srqn);
1954                 break;
1955
1956         default:
1957                 err = -EINVAL;
1958         }
1959
1960         return err;
1961 }
1962
1963 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1964                                      u8 smac_index, u64 *mac)
1965 {
1966         struct mlx4_priv *priv = mlx4_priv(dev);
1967         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1968         struct list_head *mac_list =
1969                 &tracker->slave_list[slave].res_list[RES_MAC];
1970         struct mac_res *res, *tmp;
1971
1972         list_for_each_entry_safe(res, tmp, mac_list, list) {
1973                 if (res->smac_index == smac_index && res->port == (u8) port) {
1974                         *mac = res->mac;
1975                         return 0;
1976                 }
1977         }
1978         return -ENOENT;
1979 }
1980
1981 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1982 {
1983         struct mlx4_priv *priv = mlx4_priv(dev);
1984         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1985         struct list_head *mac_list =
1986                 &tracker->slave_list[slave].res_list[RES_MAC];
1987         struct mac_res *res, *tmp;
1988
1989         list_for_each_entry_safe(res, tmp, mac_list, list) {
1990                 if (res->mac == mac && res->port == (u8) port) {
1991                         /* mac found. update ref count */
1992                         ++res->ref_count;
1993                         return 0;
1994                 }
1995         }
1996
1997         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1998                 return -EINVAL;
1999         res = kzalloc(sizeof *res, GFP_KERNEL);
2000         if (!res) {
2001                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2002                 return -ENOMEM;
2003         }
2004         res->mac = mac;
2005         res->port = (u8) port;
2006         res->smac_index = smac_index;
2007         res->ref_count = 1;
2008         list_add_tail(&res->list,
2009                       &tracker->slave_list[slave].res_list[RES_MAC]);
2010         return 0;
2011 }
2012
2013 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2014                                int port)
2015 {
2016         struct mlx4_priv *priv = mlx4_priv(dev);
2017         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2018         struct list_head *mac_list =
2019                 &tracker->slave_list[slave].res_list[RES_MAC];
2020         struct mac_res *res, *tmp;
2021
2022         list_for_each_entry_safe(res, tmp, mac_list, list) {
2023                 if (res->mac == mac && res->port == (u8) port) {
2024                         if (!--res->ref_count) {
2025                                 list_del(&res->list);
2026                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2027                                 kfree(res);
2028                         }
2029                         break;
2030                 }
2031         }
2032 }
2033
2034 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2035 {
2036         struct mlx4_priv *priv = mlx4_priv(dev);
2037         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2038         struct list_head *mac_list =
2039                 &tracker->slave_list[slave].res_list[RES_MAC];
2040         struct mac_res *res, *tmp;
2041         int i;
2042
2043         list_for_each_entry_safe(res, tmp, mac_list, list) {
2044                 list_del(&res->list);
2045                 /* dereference the mac the num times the slave referenced it */
2046                 for (i = 0; i < res->ref_count; i++)
2047                         __mlx4_unregister_mac(dev, res->port, res->mac);
2048                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2049                 kfree(res);
2050         }
2051 }
2052
2053 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2054                          u64 in_param, u64 *out_param, int in_port)
2055 {
2056         int err = -EINVAL;
2057         int port;
2058         u64 mac;
2059         u8 smac_index;
2060
2061         if (op != RES_OP_RESERVE_AND_MAP)
2062                 return err;
2063
2064         port = !in_port ? get_param_l(out_param) : in_port;
2065         port = mlx4_slave_convert_port(
2066                         dev, slave, port);
2067
2068         if (port < 0)
2069                 return -EINVAL;
2070         mac = in_param;
2071
2072         err = __mlx4_register_mac(dev, port, mac);
2073         if (err >= 0) {
2074                 smac_index = err;
2075                 set_param_l(out_param, err);
2076                 err = 0;
2077         }
2078
2079         if (!err) {
2080                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2081                 if (err)
2082                         __mlx4_unregister_mac(dev, port, mac);
2083         }
2084         return err;
2085 }
2086
2087 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2088                              int port, int vlan_index)
2089 {
2090         struct mlx4_priv *priv = mlx4_priv(dev);
2091         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2092         struct list_head *vlan_list =
2093                 &tracker->slave_list[slave].res_list[RES_VLAN];
2094         struct vlan_res *res, *tmp;
2095
2096         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2097                 if (res->vlan == vlan && res->port == (u8) port) {
2098                         /* vlan found. update ref count */
2099                         ++res->ref_count;
2100                         return 0;
2101                 }
2102         }
2103
2104         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2105                 return -EINVAL;
2106         res = kzalloc(sizeof(*res), GFP_KERNEL);
2107         if (!res) {
2108                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2109                 return -ENOMEM;
2110         }
2111         res->vlan = vlan;
2112         res->port = (u8) port;
2113         res->vlan_index = vlan_index;
2114         res->ref_count = 1;
2115         list_add_tail(&res->list,
2116                       &tracker->slave_list[slave].res_list[RES_VLAN]);
2117         return 0;
2118 }
2119
2120
2121 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2122                                 int port)
2123 {
2124         struct mlx4_priv *priv = mlx4_priv(dev);
2125         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2126         struct list_head *vlan_list =
2127                 &tracker->slave_list[slave].res_list[RES_VLAN];
2128         struct vlan_res *res, *tmp;
2129
2130         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2131                 if (res->vlan == vlan && res->port == (u8) port) {
2132                         if (!--res->ref_count) {
2133                                 list_del(&res->list);
2134                                 mlx4_release_resource(dev, slave, RES_VLAN,
2135                                                       1, port);
2136                                 kfree(res);
2137                         }
2138                         break;
2139                 }
2140         }
2141 }
2142
2143 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2144 {
2145         struct mlx4_priv *priv = mlx4_priv(dev);
2146         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2147         struct list_head *vlan_list =
2148                 &tracker->slave_list[slave].res_list[RES_VLAN];
2149         struct vlan_res *res, *tmp;
2150         int i;
2151
2152         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2153                 list_del(&res->list);
2154                 /* dereference the vlan the num times the slave referenced it */
2155                 for (i = 0; i < res->ref_count; i++)
2156                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
2157                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2158                 kfree(res);
2159         }
2160 }
2161
2162 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2163                           u64 in_param, u64 *out_param, int in_port)
2164 {
2165         struct mlx4_priv *priv = mlx4_priv(dev);
2166         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2167         int err;
2168         u16 vlan;
2169         int vlan_index;
2170         int port;
2171
2172         port = !in_port ? get_param_l(out_param) : in_port;
2173
2174         if (!port || op != RES_OP_RESERVE_AND_MAP)
2175                 return -EINVAL;
2176
2177         port = mlx4_slave_convert_port(
2178                         dev, slave, port);
2179
2180         if (port < 0)
2181                 return -EINVAL;
2182         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2183         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2184                 slave_state[slave].old_vlan_api = true;
2185                 return 0;
2186         }
2187
2188         vlan = (u16) in_param;
2189
2190         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2191         if (!err) {
2192                 set_param_l(out_param, (u32) vlan_index);
2193                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2194                 if (err)
2195                         __mlx4_unregister_vlan(dev, port, vlan);
2196         }
2197         return err;
2198 }
2199
2200 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2201                              u64 in_param, u64 *out_param, int port)
2202 {
2203         u32 index;
2204         int err;
2205
2206         if (op != RES_OP_RESERVE)
2207                 return -EINVAL;
2208
2209         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2210         if (err)
2211                 return err;
2212
2213         err = __mlx4_counter_alloc(dev, &index);
2214         if (err) {
2215                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2216                 return err;
2217         }
2218
2219         err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2220         if (err) {
2221                 __mlx4_counter_free(dev, index);
2222                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2223         } else {
2224                 set_param_l(out_param, index);
2225         }
2226
2227         return err;
2228 }
2229
2230 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2231                            u64 in_param, u64 *out_param)
2232 {
2233         u32 xrcdn;
2234         int err;
2235
2236         if (op != RES_OP_RESERVE)
2237                 return -EINVAL;
2238
2239         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2240         if (err)
2241                 return err;
2242
2243         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2244         if (err)
2245                 __mlx4_xrcd_free(dev, xrcdn);
2246         else
2247                 set_param_l(out_param, xrcdn);
2248
2249         return err;
2250 }
2251
2252 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2253                            struct mlx4_vhcr *vhcr,
2254                            struct mlx4_cmd_mailbox *inbox,
2255                            struct mlx4_cmd_mailbox *outbox,
2256                            struct mlx4_cmd_info *cmd)
2257 {
2258         int err;
2259         int alop = vhcr->op_modifier;
2260
2261         switch (vhcr->in_modifier & 0xFF) {
2262         case RES_QP:
2263                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2264                                    vhcr->in_param, &vhcr->out_param);
2265                 break;
2266
2267         case RES_MTT:
2268                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2269                                     vhcr->in_param, &vhcr->out_param);
2270                 break;
2271
2272         case RES_MPT:
2273                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2274                                     vhcr->in_param, &vhcr->out_param);
2275                 break;
2276
2277         case RES_CQ:
2278                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2279                                    vhcr->in_param, &vhcr->out_param);
2280                 break;
2281
2282         case RES_SRQ:
2283                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2284                                     vhcr->in_param, &vhcr->out_param);
2285                 break;
2286
2287         case RES_MAC:
2288                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2289                                     vhcr->in_param, &vhcr->out_param,
2290                                     (vhcr->in_modifier >> 8) & 0xFF);
2291                 break;
2292
2293         case RES_VLAN:
2294                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2295                                      vhcr->in_param, &vhcr->out_param,
2296                                      (vhcr->in_modifier >> 8) & 0xFF);
2297                 break;
2298
2299         case RES_COUNTER:
2300                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2301                                         vhcr->in_param, &vhcr->out_param, 0);
2302                 break;
2303
2304         case RES_XRCD:
2305                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2306                                       vhcr->in_param, &vhcr->out_param);
2307                 break;
2308
2309         default:
2310                 err = -EINVAL;
2311                 break;
2312         }
2313
2314         return err;
2315 }
2316
2317 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2318                        u64 in_param)
2319 {
2320         int err;
2321         int count;
2322         int base;
2323         int qpn;
2324
2325         switch (op) {
2326         case RES_OP_RESERVE:
2327                 base = get_param_l(&in_param) & 0x7fffff;
2328                 count = get_param_h(&in_param);
2329                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2330                 if (err)
2331                         break;
2332                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2333                 __mlx4_qp_release_range(dev, base, count);
2334                 break;
2335         case RES_OP_MAP_ICM:
2336                 qpn = get_param_l(&in_param) & 0x7fffff;
2337                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2338                                            NULL, 0);
2339                 if (err)
2340                         return err;
2341
2342                 if (!fw_reserved(dev, qpn))
2343                         __mlx4_qp_free_icm(dev, qpn);
2344
2345                 res_end_move(dev, slave, RES_QP, qpn);
2346
2347                 if (valid_reserved(dev, slave, qpn))
2348                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2349                 break;
2350         default:
2351                 err = -EINVAL;
2352                 break;
2353         }
2354         return err;
2355 }
2356
2357 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2358                         u64 in_param, u64 *out_param)
2359 {
2360         int err = -EINVAL;
2361         int base;
2362         int order;
2363
2364         if (op != RES_OP_RESERVE_AND_MAP)
2365                 return err;
2366
2367         base = get_param_l(&in_param);
2368         order = get_param_h(&in_param);
2369         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2370         if (!err) {
2371                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2372                 __mlx4_free_mtt_range(dev, base, order);
2373         }
2374         return err;
2375 }
2376
2377 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2378                         u64 in_param)
2379 {
2380         int err = -EINVAL;
2381         int index;
2382         int id;
2383         struct res_mpt *mpt;
2384
2385         switch (op) {
2386         case RES_OP_RESERVE:
2387                 index = get_param_l(&in_param);
2388                 id = index & mpt_mask(dev);
2389                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2390                 if (err)
2391                         break;
2392                 index = mpt->key;
2393                 put_res(dev, slave, id, RES_MPT);
2394
2395                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2396                 if (err)
2397                         break;
2398                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2399                 __mlx4_mpt_release(dev, index);
2400                 break;
2401         case RES_OP_MAP_ICM:
2402                 index = get_param_l(&in_param);
2403                 id = index & mpt_mask(dev);
2404                 err = mr_res_start_move_to(dev, slave, id,
2405                                            RES_MPT_RESERVED, &mpt);
2406                 if (err)
2407                         return err;
2408
2409                 __mlx4_mpt_free_icm(dev, mpt->key);
2410                 res_end_move(dev, slave, RES_MPT, id);
2411                 break;
2412         default:
2413                 err = -EINVAL;
2414                 break;
2415         }
2416         return err;
2417 }
2418
2419 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2420                        u64 in_param, u64 *out_param)
2421 {
2422         int cqn;
2423         int err;
2424
2425         switch (op) {
2426         case RES_OP_RESERVE_AND_MAP:
2427                 cqn = get_param_l(&in_param);
2428                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2429                 if (err)
2430                         break;
2431
2432                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2433                 __mlx4_cq_free_icm(dev, cqn);
2434                 break;
2435
2436         default:
2437                 err = -EINVAL;
2438                 break;
2439         }
2440
2441         return err;
2442 }
2443
2444 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2445                         u64 in_param, u64 *out_param)
2446 {
2447         int srqn;
2448         int err;
2449
2450         switch (op) {
2451         case RES_OP_RESERVE_AND_MAP:
2452                 srqn = get_param_l(&in_param);
2453                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2454                 if (err)
2455                         break;
2456
2457                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2458                 __mlx4_srq_free_icm(dev, srqn);
2459                 break;
2460
2461         default:
2462                 err = -EINVAL;
2463                 break;
2464         }
2465
2466         return err;
2467 }
2468
2469 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2470                             u64 in_param, u64 *out_param, int in_port)
2471 {
2472         int port;
2473         int err = 0;
2474
2475         switch (op) {
2476         case RES_OP_RESERVE_AND_MAP:
2477                 port = !in_port ? get_param_l(out_param) : in_port;
2478                 port = mlx4_slave_convert_port(
2479                                 dev, slave, port);
2480
2481                 if (port < 0)
2482                         return -EINVAL;
2483                 mac_del_from_slave(dev, slave, in_param, port);
2484                 __mlx4_unregister_mac(dev, port, in_param);
2485                 break;
2486         default:
2487                 err = -EINVAL;
2488                 break;
2489         }
2490
2491         return err;
2492
2493 }
2494
2495 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2496                             u64 in_param, u64 *out_param, int port)
2497 {
2498         struct mlx4_priv *priv = mlx4_priv(dev);
2499         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2500         int err = 0;
2501
2502         port = mlx4_slave_convert_port(
2503                         dev, slave, port);
2504
2505         if (port < 0)
2506                 return -EINVAL;
2507         switch (op) {
2508         case RES_OP_RESERVE_AND_MAP:
2509                 if (slave_state[slave].old_vlan_api)
2510                         return 0;
2511                 if (!port)
2512                         return -EINVAL;
2513                 vlan_del_from_slave(dev, slave, in_param, port);
2514                 __mlx4_unregister_vlan(dev, port, in_param);
2515                 break;
2516         default:
2517                 err = -EINVAL;
2518                 break;
2519         }
2520
2521         return err;
2522 }
2523
2524 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2525                             u64 in_param, u64 *out_param)
2526 {
2527         int index;
2528         int err;
2529
2530         if (op != RES_OP_RESERVE)
2531                 return -EINVAL;
2532
2533         index = get_param_l(&in_param);
2534         if (index == MLX4_SINK_COUNTER_INDEX(dev))
2535                 return 0;
2536
2537         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2538         if (err)
2539                 return err;
2540
2541         __mlx4_counter_free(dev, index);
2542         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2543
2544         return err;
2545 }
2546
2547 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2548                           u64 in_param, u64 *out_param)
2549 {
2550         int xrcdn;
2551         int err;
2552
2553         if (op != RES_OP_RESERVE)
2554                 return -EINVAL;
2555
2556         xrcdn = get_param_l(&in_param);
2557         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2558         if (err)
2559                 return err;
2560
2561         __mlx4_xrcd_free(dev, xrcdn);
2562
2563         return err;
2564 }
2565
2566 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2567                           struct mlx4_vhcr *vhcr,
2568                           struct mlx4_cmd_mailbox *inbox,
2569                           struct mlx4_cmd_mailbox *outbox,
2570                           struct mlx4_cmd_info *cmd)
2571 {
2572         int err = -EINVAL;
2573         int alop = vhcr->op_modifier;
2574
2575         switch (vhcr->in_modifier & 0xFF) {
2576         case RES_QP:
2577                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2578                                   vhcr->in_param);
2579                 break;
2580
2581         case RES_MTT:
2582                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2583                                    vhcr->in_param, &vhcr->out_param);
2584                 break;
2585
2586         case RES_MPT:
2587                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2588                                    vhcr->in_param);
2589                 break;
2590
2591         case RES_CQ:
2592                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2593                                   vhcr->in_param, &vhcr->out_param);
2594                 break;
2595
2596         case RES_SRQ:
2597                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2598                                    vhcr->in_param, &vhcr->out_param);
2599                 break;
2600
2601         case RES_MAC:
2602                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2603                                    vhcr->in_param, &vhcr->out_param,
2604                                    (vhcr->in_modifier >> 8) & 0xFF);
2605                 break;
2606
2607         case RES_VLAN:
2608                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2609                                     vhcr->in_param, &vhcr->out_param,
2610                                     (vhcr->in_modifier >> 8) & 0xFF);
2611                 break;
2612
2613         case RES_COUNTER:
2614                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2615                                        vhcr->in_param, &vhcr->out_param);
2616                 break;
2617
2618         case RES_XRCD:
2619                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2620                                      vhcr->in_param, &vhcr->out_param);
2621
2622         default:
2623                 break;
2624         }
2625         return err;
2626 }
2627
2628 /* ugly but other choices are uglier */
2629 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2630 {
2631         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2632 }
2633
2634 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2635 {
2636         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2637 }
2638
2639 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2640 {
2641         return be32_to_cpu(mpt->mtt_sz);
2642 }
2643
2644 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2645 {
2646         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2647 }
2648
2649 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2650 {
2651         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2652 }
2653
2654 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2655 {
2656         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2657 }
2658
2659 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2660 {
2661         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2662 }
2663
2664 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2665 {
2666         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2667 }
2668
2669 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2670 {
2671         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2672 }
2673
2674 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2675 {
2676         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2677         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2678         int log_sq_sride = qpc->sq_size_stride & 7;
2679         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2680         int log_rq_stride = qpc->rq_size_stride & 7;
2681         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2682         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2683         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2684         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2685         int sq_size;
2686         int rq_size;
2687         int total_pages;
2688         int total_mem;
2689         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2690         int tot;
2691
2692         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2693         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2694         total_mem = sq_size + rq_size;
2695         tot = (total_mem + (page_offset << 6)) >> page_shift;
2696         total_pages = !tot ? 1 : roundup_pow_of_two(tot);
2697
2698         return total_pages;
2699 }
2700
2701 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2702                            int size, struct res_mtt *mtt)
2703 {
2704         int res_start = mtt->com.res_id;
2705         int res_size = (1 << mtt->order);
2706
2707         if (start < res_start || start + size > res_start + res_size)
2708                 return -EPERM;
2709         return 0;
2710 }
2711
2712 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2713                            struct mlx4_vhcr *vhcr,
2714                            struct mlx4_cmd_mailbox *inbox,
2715                            struct mlx4_cmd_mailbox *outbox,
2716                            struct mlx4_cmd_info *cmd)
2717 {
2718         int err;
2719         int index = vhcr->in_modifier;
2720         struct res_mtt *mtt;
2721         struct res_mpt *mpt;
2722         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2723         int phys;
2724         int id;
2725         u32 pd;
2726         int pd_slave;
2727
2728         id = index & mpt_mask(dev);
2729         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2730         if (err)
2731                 return err;
2732
2733         /* Disable memory windows for VFs. */
2734         if (!mr_is_region(inbox->buf)) {
2735                 err = -EPERM;
2736                 goto ex_abort;
2737         }
2738
2739         /* Make sure that the PD bits related to the slave id are zeros. */
2740         pd = mr_get_pd(inbox->buf);
2741         pd_slave = (pd >> 17) & 0x7f;
2742         if (pd_slave != 0 && --pd_slave != slave) {
2743                 err = -EPERM;
2744                 goto ex_abort;
2745         }
2746
2747         if (mr_is_fmr(inbox->buf)) {
2748                 /* FMR and Bind Enable are forbidden in slave devices. */
2749                 if (mr_is_bind_enabled(inbox->buf)) {
2750                         err = -EPERM;
2751                         goto ex_abort;
2752                 }
2753                 /* FMR and Memory Windows are also forbidden. */
2754                 if (!mr_is_region(inbox->buf)) {
2755                         err = -EPERM;
2756                         goto ex_abort;
2757                 }
2758         }
2759
2760         phys = mr_phys_mpt(inbox->buf);
2761         if (!phys) {
2762                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2763                 if (err)
2764                         goto ex_abort;
2765
2766                 err = check_mtt_range(dev, slave, mtt_base,
2767                                       mr_get_mtt_size(inbox->buf), mtt);
2768                 if (err)
2769                         goto ex_put;
2770
2771                 mpt->mtt = mtt;
2772         }
2773
2774         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2775         if (err)
2776                 goto ex_put;
2777
2778         if (!phys) {
2779                 atomic_inc(&mtt->ref_count);
2780                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2781         }
2782
2783         res_end_move(dev, slave, RES_MPT, id);
2784         return 0;
2785
2786 ex_put:
2787         if (!phys)
2788                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2789 ex_abort:
2790         res_abort_move(dev, slave, RES_MPT, id);
2791
2792         return err;
2793 }
2794
2795 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2796                            struct mlx4_vhcr *vhcr,
2797                            struct mlx4_cmd_mailbox *inbox,
2798                            struct mlx4_cmd_mailbox *outbox,
2799                            struct mlx4_cmd_info *cmd)
2800 {
2801         int err;
2802         int index = vhcr->in_modifier;
2803         struct res_mpt *mpt;
2804         int id;
2805
2806         id = index & mpt_mask(dev);
2807         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2808         if (err)
2809                 return err;
2810
2811         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2812         if (err)
2813                 goto ex_abort;
2814
2815         if (mpt->mtt)
2816                 atomic_dec(&mpt->mtt->ref_count);
2817
2818         res_end_move(dev, slave, RES_MPT, id);
2819         return 0;
2820
2821 ex_abort:
2822         res_abort_move(dev, slave, RES_MPT, id);
2823
2824         return err;
2825 }
2826
2827 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2828                            struct mlx4_vhcr *vhcr,
2829                            struct mlx4_cmd_mailbox *inbox,
2830                            struct mlx4_cmd_mailbox *outbox,
2831                            struct mlx4_cmd_info *cmd)
2832 {
2833         int err;
2834         int index = vhcr->in_modifier;
2835         struct res_mpt *mpt;
2836         int id;
2837
2838         id = index & mpt_mask(dev);
2839         err = get_res(dev, slave, id, RES_MPT, &mpt);
2840         if (err)
2841                 return err;
2842
2843         if (mpt->com.from_state == RES_MPT_MAPPED) {
2844                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2845                  * that, the VF must read the MPT. But since the MPT entry memory is not
2846                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2847                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2848                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2849                  * ownership fofollowing the change. The change here allows the VF to
2850                  * perform QUERY_MPT also when the entry is in SW ownership.
2851                  */
2852                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2853                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2854                                         mpt->key, NULL);
2855
2856                 if (NULL == mpt_entry || NULL == outbox->buf) {
2857                         err = -EINVAL;
2858                         goto out;
2859                 }
2860
2861                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2862
2863                 err = 0;
2864         } else if (mpt->com.from_state == RES_MPT_HW) {
2865                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2866         } else {
2867                 err = -EBUSY;
2868                 goto out;
2869         }
2870
2871
2872 out:
2873         put_res(dev, slave, id, RES_MPT);
2874         return err;
2875 }
2876
2877 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2878 {
2879         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2880 }
2881
2882 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2883 {
2884         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2885 }
2886
2887 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2888 {
2889         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2890 }
2891
2892 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2893                                   struct mlx4_qp_context *context)
2894 {
2895         u32 qpn = vhcr->in_modifier & 0xffffff;
2896         u32 qkey = 0;
2897
2898         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2899                 return;
2900
2901         /* adjust qkey in qp context */
2902         context->qkey = cpu_to_be32(qkey);
2903 }
2904
2905 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2906                                  struct mlx4_qp_context *qpc,
2907                                  struct mlx4_cmd_mailbox *inbox);
2908
2909 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2910                              struct mlx4_vhcr *vhcr,
2911                              struct mlx4_cmd_mailbox *inbox,
2912                              struct mlx4_cmd_mailbox *outbox,
2913                              struct mlx4_cmd_info *cmd)
2914 {
2915         int err;
2916         int qpn = vhcr->in_modifier & 0x7fffff;
2917         struct res_mtt *mtt;
2918         struct res_qp *qp;
2919         struct mlx4_qp_context *qpc = inbox->buf + 8;
2920         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2921         int mtt_size = qp_get_mtt_size(qpc);
2922         struct res_cq *rcq;
2923         struct res_cq *scq;
2924         int rcqn = qp_get_rcqn(qpc);
2925         int scqn = qp_get_scqn(qpc);
2926         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2927         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2928         struct res_srq *srq;
2929         int local_qpn = vhcr->in_modifier & 0xffffff;
2930
2931         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2932         if (err)
2933                 return err;
2934
2935         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2936         if (err)
2937                 return err;
2938         qp->local_qpn = local_qpn;
2939         qp->sched_queue = 0;
2940         qp->param3 = 0;
2941         qp->vlan_control = 0;
2942         qp->fvl_rx = 0;
2943         qp->pri_path_fl = 0;
2944         qp->vlan_index = 0;
2945         qp->feup = 0;
2946         qp->qpc_flags = be32_to_cpu(qpc->flags);
2947
2948         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2949         if (err)
2950                 goto ex_abort;
2951
2952         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2953         if (err)
2954                 goto ex_put_mtt;
2955
2956         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2957         if (err)
2958                 goto ex_put_mtt;
2959
2960         if (scqn != rcqn) {
2961                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2962                 if (err)
2963                         goto ex_put_rcq;
2964         } else
2965                 scq = rcq;
2966
2967         if (use_srq) {
2968                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2969                 if (err)
2970                         goto ex_put_scq;
2971         }
2972
2973         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2974         update_pkey_index(dev, slave, inbox);
2975         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2976         if (err)
2977                 goto ex_put_srq;
2978         atomic_inc(&mtt->ref_count);
2979         qp->mtt = mtt;
2980         atomic_inc(&rcq->ref_count);
2981         qp->rcq = rcq;
2982         atomic_inc(&scq->ref_count);
2983         qp->scq = scq;
2984
2985         if (scqn != rcqn)
2986                 put_res(dev, slave, scqn, RES_CQ);
2987
2988         if (use_srq) {
2989                 atomic_inc(&srq->ref_count);
2990                 put_res(dev, slave, srqn, RES_SRQ);
2991                 qp->srq = srq;
2992         }
2993
2994         /* Save param3 for dynamic changes from VST back to VGT */
2995         qp->param3 = qpc->param3;
2996         put_res(dev, slave, rcqn, RES_CQ);
2997         put_res(dev, slave, mtt_base, RES_MTT);
2998         res_end_move(dev, slave, RES_QP, qpn);
2999
3000         return 0;
3001
3002 ex_put_srq:
3003         if (use_srq)
3004                 put_res(dev, slave, srqn, RES_SRQ);
3005 ex_put_scq:
3006         if (scqn != rcqn)
3007                 put_res(dev, slave, scqn, RES_CQ);
3008 ex_put_rcq:
3009         put_res(dev, slave, rcqn, RES_CQ);
3010 ex_put_mtt:
3011         put_res(dev, slave, mtt_base, RES_MTT);
3012 ex_abort:
3013         res_abort_move(dev, slave, RES_QP, qpn);
3014
3015         return err;
3016 }
3017
3018 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3019 {
3020         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3021 }
3022
3023 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3024 {
3025         int log_eq_size = eqc->log_eq_size & 0x1f;
3026         int page_shift = (eqc->log_page_size & 0x3f) + 12;
3027
3028         if (log_eq_size + 5 < page_shift)
3029                 return 1;
3030
3031         return 1 << (log_eq_size + 5 - page_shift);
3032 }
3033
3034 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3035 {
3036         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3037 }
3038
3039 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3040 {
3041         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3042         int page_shift = (cqc->log_page_size & 0x3f) + 12;
3043
3044         if (log_cq_size + 5 < page_shift)
3045                 return 1;
3046
3047         return 1 << (log_cq_size + 5 - page_shift);
3048 }
3049
3050 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3051                           struct mlx4_vhcr *vhcr,
3052                           struct mlx4_cmd_mailbox *inbox,
3053                           struct mlx4_cmd_mailbox *outbox,
3054                           struct mlx4_cmd_info *cmd)
3055 {
3056         int err;
3057         int eqn = vhcr->in_modifier;
3058         int res_id = (slave << 10) | eqn;
3059         struct mlx4_eq_context *eqc = inbox->buf;
3060         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3061         int mtt_size = eq_get_mtt_size(eqc);
3062         struct res_eq *eq;
3063         struct res_mtt *mtt;
3064
3065         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3066         if (err)
3067                 return err;
3068         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3069         if (err)
3070                 goto out_add;
3071
3072         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3073         if (err)
3074                 goto out_move;
3075
3076         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3077         if (err)
3078                 goto out_put;
3079
3080         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3081         if (err)
3082                 goto out_put;
3083
3084         atomic_inc(&mtt->ref_count);
3085         eq->mtt = mtt;
3086         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3087         res_end_move(dev, slave, RES_EQ, res_id);
3088         return 0;
3089
3090 out_put:
3091         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3092 out_move:
3093         res_abort_move(dev, slave, RES_EQ, res_id);
3094 out_add:
3095         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3096         return err;
3097 }
3098
3099 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3100                             struct mlx4_vhcr *vhcr,
3101                             struct mlx4_cmd_mailbox *inbox,
3102                             struct mlx4_cmd_mailbox *outbox,
3103                             struct mlx4_cmd_info *cmd)
3104 {
3105         int err;
3106         u8 get = vhcr->op_modifier;
3107
3108         if (get != 1)
3109                 return -EPERM;
3110
3111         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3112
3113         return err;
3114 }
3115
3116 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3117                               int len, struct res_mtt **res)
3118 {
3119         struct mlx4_priv *priv = mlx4_priv(dev);
3120         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3121         struct res_mtt *mtt;
3122         int err = -EINVAL;
3123
3124         spin_lock_irq(mlx4_tlock(dev));
3125         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3126                             com.list) {
3127                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3128                         *res = mtt;
3129                         mtt->com.from_state = mtt->com.state;
3130                         mtt->com.state = RES_MTT_BUSY;
3131                         err = 0;
3132                         break;
3133                 }
3134         }
3135         spin_unlock_irq(mlx4_tlock(dev));
3136
3137         return err;
3138 }
3139
3140 static int verify_qp_parameters(struct mlx4_dev *dev,
3141                                 struct mlx4_vhcr *vhcr,
3142                                 struct mlx4_cmd_mailbox *inbox,
3143                                 enum qp_transition transition, u8 slave)
3144 {
3145         u32                     qp_type;
3146         u32                     qpn;
3147         struct mlx4_qp_context  *qp_ctx;
3148         enum mlx4_qp_optpar     optpar;
3149         int port;
3150         int num_gids;
3151
3152         qp_ctx  = inbox->buf + 8;
3153         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3154         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
3155
3156         if (slave != mlx4_master_func_num(dev)) {
3157                 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3158                 /* setting QP rate-limit is disallowed for VFs */
3159                 if (qp_ctx->rate_limit_params)
3160                         return -EPERM;
3161         }
3162
3163         switch (qp_type) {
3164         case MLX4_QP_ST_RC:
3165         case MLX4_QP_ST_XRC:
3166         case MLX4_QP_ST_UC:
3167                 switch (transition) {
3168                 case QP_TRANS_INIT2RTR:
3169                 case QP_TRANS_RTR2RTS:
3170                 case QP_TRANS_RTS2RTS:
3171                 case QP_TRANS_SQD2SQD:
3172                 case QP_TRANS_SQD2RTS:
3173                         if (slave != mlx4_master_func_num(dev)) {
3174                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3175                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3176                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3177                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3178                                         else
3179                                                 num_gids = 1;
3180                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
3181                                                 return -EINVAL;
3182                                 }
3183                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3184                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3185                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3186                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3187                                         else
3188                                                 num_gids = 1;
3189                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
3190                                                 return -EINVAL;
3191                                 }
3192                         }
3193                         break;
3194                 default:
3195                         break;
3196                 }
3197                 break;
3198
3199         case MLX4_QP_ST_MLX:
3200                 qpn = vhcr->in_modifier & 0x7fffff;
3201                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3202                 if (transition == QP_TRANS_INIT2RTR &&
3203                     slave != mlx4_master_func_num(dev) &&
3204                     mlx4_is_qp_reserved(dev, qpn) &&
3205                     !mlx4_vf_smi_enabled(dev, slave, port)) {
3206                         /* only enabled VFs may create MLX proxy QPs */
3207                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3208                                  __func__, slave, port);
3209                         return -EPERM;
3210                 }
3211                 break;
3212
3213         default:
3214                 break;
3215         }
3216
3217         return 0;
3218 }
3219
3220 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3221                            struct mlx4_vhcr *vhcr,
3222                            struct mlx4_cmd_mailbox *inbox,
3223                            struct mlx4_cmd_mailbox *outbox,
3224                            struct mlx4_cmd_info *cmd)
3225 {
3226         struct mlx4_mtt mtt;
3227         __be64 *page_list = inbox->buf;
3228         u64 *pg_list = (u64 *)page_list;
3229         int i;
3230         struct res_mtt *rmtt = NULL;
3231         int start = be64_to_cpu(page_list[0]);
3232         int npages = vhcr->in_modifier;
3233         int err;
3234
3235         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3236         if (err)
3237                 return err;
3238
3239         /* Call the SW implementation of write_mtt:
3240          * - Prepare a dummy mtt struct
3241          * - Translate inbox contents to simple addresses in host endianness */
3242         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3243                             we don't really use it */
3244         mtt.order = 0;
3245         mtt.page_shift = 0;
3246         for (i = 0; i < npages; ++i)
3247                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3248
3249         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3250                                ((u64 *)page_list + 2));
3251
3252         if (rmtt)
3253                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3254
3255         return err;
3256 }
3257
3258 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3259                           struct mlx4_vhcr *vhcr,
3260                           struct mlx4_cmd_mailbox *inbox,
3261                           struct mlx4_cmd_mailbox *outbox,
3262                           struct mlx4_cmd_info *cmd)
3263 {
3264         int eqn = vhcr->in_modifier;
3265         int res_id = eqn | (slave << 10);
3266         struct res_eq *eq;
3267         int err;
3268
3269         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3270         if (err)
3271                 return err;
3272
3273         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3274         if (err)
3275                 goto ex_abort;
3276
3277         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3278         if (err)
3279                 goto ex_put;
3280
3281         atomic_dec(&eq->mtt->ref_count);
3282         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3283         res_end_move(dev, slave, RES_EQ, res_id);
3284         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3285
3286         return 0;
3287
3288 ex_put:
3289         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3290 ex_abort:
3291         res_abort_move(dev, slave, RES_EQ, res_id);
3292
3293         return err;
3294 }
3295
3296 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3297 {
3298         struct mlx4_priv *priv = mlx4_priv(dev);
3299         struct mlx4_slave_event_eq_info *event_eq;
3300         struct mlx4_cmd_mailbox *mailbox;
3301         u32 in_modifier = 0;
3302         int err;
3303         int res_id;
3304         struct res_eq *req;
3305
3306         if (!priv->mfunc.master.slave_state)
3307                 return -EINVAL;
3308
3309         /* check for slave valid, slave not PF, and slave active */
3310         if (slave < 0 || slave > dev->persist->num_vfs ||
3311             slave == dev->caps.function ||
3312             !priv->mfunc.master.slave_state[slave].active)
3313                 return 0;
3314
3315         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3316
3317         /* Create the event only if the slave is registered */
3318         if (event_eq->eqn < 0)
3319                 return 0;
3320
3321         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3322         res_id = (slave << 10) | event_eq->eqn;
3323         err = get_res(dev, slave, res_id, RES_EQ, &req);
3324         if (err)
3325                 goto unlock;
3326
3327         if (req->com.from_state != RES_EQ_HW) {
3328                 err = -EINVAL;
3329                 goto put;
3330         }
3331
3332         mailbox = mlx4_alloc_cmd_mailbox(dev);
3333         if (IS_ERR(mailbox)) {
3334                 err = PTR_ERR(mailbox);
3335                 goto put;
3336         }
3337
3338         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3339                 ++event_eq->token;
3340                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3341         }
3342
3343         memcpy(mailbox->buf, (u8 *) eqe, 28);
3344
3345         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3346
3347         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3348                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3349                        MLX4_CMD_NATIVE);
3350
3351         put_res(dev, slave, res_id, RES_EQ);
3352         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3353         mlx4_free_cmd_mailbox(dev, mailbox);
3354         return err;
3355
3356 put:
3357         put_res(dev, slave, res_id, RES_EQ);
3358
3359 unlock:
3360         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3361         return err;
3362 }
3363
3364 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3365                           struct mlx4_vhcr *vhcr,
3366                           struct mlx4_cmd_mailbox *inbox,
3367                           struct mlx4_cmd_mailbox *outbox,
3368                           struct mlx4_cmd_info *cmd)
3369 {
3370         int eqn = vhcr->in_modifier;
3371         int res_id = eqn | (slave << 10);
3372         struct res_eq *eq;
3373         int err;
3374
3375         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3376         if (err)
3377                 return err;
3378
3379         if (eq->com.from_state != RES_EQ_HW) {
3380                 err = -EINVAL;
3381                 goto ex_put;
3382         }
3383
3384         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3385
3386 ex_put:
3387         put_res(dev, slave, res_id, RES_EQ);
3388         return err;
3389 }
3390
3391 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3392                           struct mlx4_vhcr *vhcr,
3393                           struct mlx4_cmd_mailbox *inbox,
3394                           struct mlx4_cmd_mailbox *outbox,
3395                           struct mlx4_cmd_info *cmd)
3396 {
3397         int err;
3398         int cqn = vhcr->in_modifier;
3399         struct mlx4_cq_context *cqc = inbox->buf;
3400         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3401         struct res_cq *cq = NULL;
3402         struct res_mtt *mtt;
3403
3404         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3405         if (err)
3406                 return err;
3407         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3408         if (err)
3409                 goto out_move;
3410         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3411         if (err)
3412                 goto out_put;
3413         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3414         if (err)
3415                 goto out_put;
3416         atomic_inc(&mtt->ref_count);
3417         cq->mtt = mtt;
3418         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3419         res_end_move(dev, slave, RES_CQ, cqn);
3420         return 0;
3421
3422 out_put:
3423         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3424 out_move:
3425         res_abort_move(dev, slave, RES_CQ, cqn);
3426         return err;
3427 }
3428
3429 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3430                           struct mlx4_vhcr *vhcr,
3431                           struct mlx4_cmd_mailbox *inbox,
3432                           struct mlx4_cmd_mailbox *outbox,
3433                           struct mlx4_cmd_info *cmd)
3434 {
3435         int err;
3436         int cqn = vhcr->in_modifier;
3437         struct res_cq *cq = NULL;
3438
3439         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3440         if (err)
3441                 return err;
3442         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3443         if (err)
3444                 goto out_move;
3445         atomic_dec(&cq->mtt->ref_count);
3446         res_end_move(dev, slave, RES_CQ, cqn);
3447         return 0;
3448
3449 out_move:
3450         res_abort_move(dev, slave, RES_CQ, cqn);
3451         return err;
3452 }
3453
3454 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3455                           struct mlx4_vhcr *vhcr,
3456                           struct mlx4_cmd_mailbox *inbox,
3457                           struct mlx4_cmd_mailbox *outbox,
3458                           struct mlx4_cmd_info *cmd)
3459 {
3460         int cqn = vhcr->in_modifier;
3461         struct res_cq *cq;
3462         int err;
3463
3464         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3465         if (err)
3466                 return err;
3467
3468         if (cq->com.from_state != RES_CQ_HW)
3469                 goto ex_put;
3470
3471         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3472 ex_put:
3473         put_res(dev, slave, cqn, RES_CQ);
3474
3475         return err;
3476 }
3477
3478 static int handle_resize(struct mlx4_dev *dev, int slave,
3479                          struct mlx4_vhcr *vhcr,
3480                          struct mlx4_cmd_mailbox *inbox,
3481                          struct mlx4_cmd_mailbox *outbox,
3482                          struct mlx4_cmd_info *cmd,
3483                          struct res_cq *cq)
3484 {
3485         int err;
3486         struct res_mtt *orig_mtt;
3487         struct res_mtt *mtt;
3488         struct mlx4_cq_context *cqc = inbox->buf;
3489         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3490
3491         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3492         if (err)
3493                 return err;
3494
3495         if (orig_mtt != cq->mtt) {
3496                 err = -EINVAL;
3497                 goto ex_put;
3498         }
3499
3500         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3501         if (err)
3502                 goto ex_put;
3503
3504         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3505         if (err)
3506                 goto ex_put1;
3507         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3508         if (err)
3509                 goto ex_put1;
3510         atomic_dec(&orig_mtt->ref_count);
3511         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3512         atomic_inc(&mtt->ref_count);
3513         cq->mtt = mtt;
3514         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3515         return 0;
3516
3517 ex_put1:
3518         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3519 ex_put:
3520         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3521
3522         return err;
3523
3524 }
3525
3526 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3527                            struct mlx4_vhcr *vhcr,
3528                            struct mlx4_cmd_mailbox *inbox,
3529                            struct mlx4_cmd_mailbox *outbox,
3530                            struct mlx4_cmd_info *cmd)
3531 {
3532         int cqn = vhcr->in_modifier;
3533         struct res_cq *cq;
3534         int err;
3535
3536         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3537         if (err)
3538                 return err;
3539
3540         if (cq->com.from_state != RES_CQ_HW)
3541                 goto ex_put;
3542
3543         if (vhcr->op_modifier == 0) {
3544                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3545                 goto ex_put;
3546         }
3547
3548         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3549 ex_put:
3550         put_res(dev, slave, cqn, RES_CQ);
3551
3552         return err;
3553 }
3554
3555 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3556 {
3557         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3558         int log_rq_stride = srqc->logstride & 7;
3559         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3560
3561         if (log_srq_size + log_rq_stride + 4 < page_shift)
3562                 return 1;
3563
3564         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3565 }
3566
3567 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3568                            struct mlx4_vhcr *vhcr,
3569                            struct mlx4_cmd_mailbox *inbox,
3570                            struct mlx4_cmd_mailbox *outbox,
3571                            struct mlx4_cmd_info *cmd)
3572 {
3573         int err;
3574         int srqn = vhcr->in_modifier;
3575         struct res_mtt *mtt;
3576         struct res_srq *srq = NULL;
3577         struct mlx4_srq_context *srqc = inbox->buf;
3578         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3579
3580         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3581                 return -EINVAL;
3582
3583         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3584         if (err)
3585                 return err;
3586         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3587         if (err)
3588                 goto ex_abort;
3589         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3590                               mtt);
3591         if (err)
3592                 goto ex_put_mtt;
3593
3594         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3595         if (err)
3596                 goto ex_put_mtt;
3597
3598         atomic_inc(&mtt->ref_count);
3599         srq->mtt = mtt;
3600         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3601         res_end_move(dev, slave, RES_SRQ, srqn);
3602         return 0;
3603
3604 ex_put_mtt:
3605         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3606 ex_abort:
3607         res_abort_move(dev, slave, RES_SRQ, srqn);
3608
3609         return err;
3610 }
3611
3612 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3613                            struct mlx4_vhcr *vhcr,
3614                            struct mlx4_cmd_mailbox *inbox,
3615                            struct mlx4_cmd_mailbox *outbox,
3616                            struct mlx4_cmd_info *cmd)
3617 {
3618         int err;
3619         int srqn = vhcr->in_modifier;
3620         struct res_srq *srq = NULL;
3621
3622         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3623         if (err)
3624                 return err;
3625         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3626         if (err)
3627                 goto ex_abort;
3628         atomic_dec(&srq->mtt->ref_count);
3629         if (srq->cq)
3630                 atomic_dec(&srq->cq->ref_count);
3631         res_end_move(dev, slave, RES_SRQ, srqn);
3632
3633         return 0;
3634
3635 ex_abort:
3636         res_abort_move(dev, slave, RES_SRQ, srqn);
3637
3638         return err;
3639 }
3640
3641 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3642                            struct mlx4_vhcr *vhcr,
3643                            struct mlx4_cmd_mailbox *inbox,
3644                            struct mlx4_cmd_mailbox *outbox,
3645                            struct mlx4_cmd_info *cmd)
3646 {
3647         int err;
3648         int srqn = vhcr->in_modifier;
3649         struct res_srq *srq;
3650
3651         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3652         if (err)
3653                 return err;
3654         if (srq->com.from_state != RES_SRQ_HW) {
3655                 err = -EBUSY;
3656                 goto out;
3657         }
3658         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3659 out:
3660         put_res(dev, slave, srqn, RES_SRQ);
3661         return err;
3662 }
3663
3664 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3665                          struct mlx4_vhcr *vhcr,
3666                          struct mlx4_cmd_mailbox *inbox,
3667                          struct mlx4_cmd_mailbox *outbox,
3668                          struct mlx4_cmd_info *cmd)
3669 {
3670         int err;
3671         int srqn = vhcr->in_modifier;
3672         struct res_srq *srq;
3673
3674         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3675         if (err)
3676                 return err;
3677
3678         if (srq->com.from_state != RES_SRQ_HW) {
3679                 err = -EBUSY;
3680                 goto out;
3681         }
3682
3683         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3684 out:
3685         put_res(dev, slave, srqn, RES_SRQ);
3686         return err;
3687 }
3688
3689 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3690                         struct mlx4_vhcr *vhcr,
3691                         struct mlx4_cmd_mailbox *inbox,
3692                         struct mlx4_cmd_mailbox *outbox,
3693                         struct mlx4_cmd_info *cmd)
3694 {
3695         int err;
3696         int qpn = vhcr->in_modifier & 0x7fffff;
3697         struct res_qp *qp;
3698
3699         err = get_res(dev, slave, qpn, RES_QP, &qp);
3700         if (err)
3701                 return err;
3702         if (qp->com.from_state != RES_QP_HW) {
3703                 err = -EBUSY;
3704                 goto out;
3705         }
3706
3707         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3708 out:
3709         put_res(dev, slave, qpn, RES_QP);
3710         return err;
3711 }
3712
3713 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3714                               struct mlx4_vhcr *vhcr,
3715                               struct mlx4_cmd_mailbox *inbox,
3716                               struct mlx4_cmd_mailbox *outbox,
3717                               struct mlx4_cmd_info *cmd)
3718 {
3719         struct mlx4_qp_context *context = inbox->buf + 8;
3720         adjust_proxy_tun_qkey(dev, vhcr, context);
3721         update_pkey_index(dev, slave, inbox);
3722         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3723 }
3724
3725 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3726                                   struct mlx4_qp_context *qpc,
3727                                   struct mlx4_cmd_mailbox *inbox)
3728 {
3729         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3730         u8 pri_sched_queue;
3731         int port = mlx4_slave_convert_port(
3732                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3733
3734         if (port < 0)
3735                 return -EINVAL;
3736
3737         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3738                           ((port & 1) << 6);
3739
3740         if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3741             qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3742                 qpc->pri_path.sched_queue = pri_sched_queue;
3743         }
3744
3745         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3746                 port = mlx4_slave_convert_port(
3747                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3748                                 + 1) - 1;
3749                 if (port < 0)
3750                         return -EINVAL;
3751                 qpc->alt_path.sched_queue =
3752                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3753                         (port & 1) << 6;
3754         }
3755         return 0;
3756 }
3757
3758 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3759                                 struct mlx4_qp_context *qpc,
3760                                 struct mlx4_cmd_mailbox *inbox)
3761 {
3762         u64 mac;
3763         int port;
3764         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3765         u8 sched = *(u8 *)(inbox->buf + 64);
3766         u8 smac_ix;
3767
3768         port = (sched >> 6 & 1) + 1;
3769         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3770                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3771                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3772                         return -ENOENT;
3773         }
3774         return 0;
3775 }
3776
3777 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3778                              struct mlx4_vhcr *vhcr,
3779                              struct mlx4_cmd_mailbox *inbox,
3780                              struct mlx4_cmd_mailbox *outbox,
3781                              struct mlx4_cmd_info *cmd)
3782 {
3783         int err;
3784         struct mlx4_qp_context *qpc = inbox->buf + 8;
3785         int qpn = vhcr->in_modifier & 0x7fffff;
3786         struct res_qp *qp;
3787         u8 orig_sched_queue;
3788         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3789         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3790         u8 orig_pri_path_fl = qpc->pri_path.fl;
3791         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3792         u8 orig_feup = qpc->pri_path.feup;
3793
3794         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3795         if (err)
3796                 return err;
3797         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3798         if (err)
3799                 return err;
3800
3801         if (roce_verify_mac(dev, slave, qpc, inbox))
3802                 return -EINVAL;
3803
3804         update_pkey_index(dev, slave, inbox);
3805         update_gid(dev, inbox, (u8)slave);
3806         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3807         orig_sched_queue = qpc->pri_path.sched_queue;
3808
3809         err = get_res(dev, slave, qpn, RES_QP, &qp);
3810         if (err)
3811                 return err;
3812         if (qp->com.from_state != RES_QP_HW) {
3813                 err = -EBUSY;
3814                 goto out;
3815         }
3816
3817         err = update_vport_qp_param(dev, inbox, slave, qpn);
3818         if (err)
3819                 goto out;
3820
3821         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3822 out:
3823         /* if no error, save sched queue value passed in by VF. This is
3824          * essentially the QOS value provided by the VF. This will be useful
3825          * if we allow dynamic changes from VST back to VGT
3826          */
3827         if (!err) {
3828                 qp->sched_queue = orig_sched_queue;
3829                 qp->vlan_control = orig_vlan_control;
3830                 qp->fvl_rx      =  orig_fvl_rx;
3831                 qp->pri_path_fl = orig_pri_path_fl;
3832                 qp->vlan_index  = orig_vlan_index;
3833                 qp->feup        = orig_feup;
3834         }
3835         put_res(dev, slave, qpn, RES_QP);
3836         return err;
3837 }
3838
3839 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3840                             struct mlx4_vhcr *vhcr,
3841                             struct mlx4_cmd_mailbox *inbox,
3842                             struct mlx4_cmd_mailbox *outbox,
3843                             struct mlx4_cmd_info *cmd)
3844 {
3845         int err;
3846         struct mlx4_qp_context *context = inbox->buf + 8;
3847
3848         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3849         if (err)
3850                 return err;
3851         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3852         if (err)
3853                 return err;
3854
3855         update_pkey_index(dev, slave, inbox);
3856         update_gid(dev, inbox, (u8)slave);
3857         adjust_proxy_tun_qkey(dev, vhcr, context);
3858         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3859 }
3860
3861 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3862                             struct mlx4_vhcr *vhcr,
3863                             struct mlx4_cmd_mailbox *inbox,
3864                             struct mlx4_cmd_mailbox *outbox,
3865                             struct mlx4_cmd_info *cmd)
3866 {
3867         int err;
3868         struct mlx4_qp_context *context = inbox->buf + 8;
3869
3870         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3871         if (err)
3872                 return err;
3873         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3874         if (err)
3875                 return err;
3876
3877         update_pkey_index(dev, slave, inbox);
3878         update_gid(dev, inbox, (u8)slave);
3879         adjust_proxy_tun_qkey(dev, vhcr, context);
3880         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3881 }
3882
3883
3884 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3885                               struct mlx4_vhcr *vhcr,
3886                               struct mlx4_cmd_mailbox *inbox,
3887                               struct mlx4_cmd_mailbox *outbox,
3888                               struct mlx4_cmd_info *cmd)
3889 {
3890         struct mlx4_qp_context *context = inbox->buf + 8;
3891         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3892         if (err)
3893                 return err;
3894         adjust_proxy_tun_qkey(dev, vhcr, context);
3895         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3896 }
3897
3898 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3899                             struct mlx4_vhcr *vhcr,
3900                             struct mlx4_cmd_mailbox *inbox,
3901                             struct mlx4_cmd_mailbox *outbox,
3902                             struct mlx4_cmd_info *cmd)
3903 {
3904         int err;
3905         struct mlx4_qp_context *context = inbox->buf + 8;
3906
3907         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3908         if (err)
3909                 return err;
3910         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3911         if (err)
3912                 return err;
3913
3914         adjust_proxy_tun_qkey(dev, vhcr, context);
3915         update_gid(dev, inbox, (u8)slave);
3916         update_pkey_index(dev, slave, inbox);
3917         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3918 }
3919
3920 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3921                             struct mlx4_vhcr *vhcr,
3922                             struct mlx4_cmd_mailbox *inbox,
3923                             struct mlx4_cmd_mailbox *outbox,
3924                             struct mlx4_cmd_info *cmd)
3925 {
3926         int err;
3927         struct mlx4_qp_context *context = inbox->buf + 8;
3928
3929         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3930         if (err)
3931                 return err;
3932         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3933         if (err)
3934                 return err;
3935
3936         adjust_proxy_tun_qkey(dev, vhcr, context);
3937         update_gid(dev, inbox, (u8)slave);
3938         update_pkey_index(dev, slave, inbox);
3939         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3940 }
3941
3942 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3943                          struct mlx4_vhcr *vhcr,
3944                          struct mlx4_cmd_mailbox *inbox,
3945                          struct mlx4_cmd_mailbox *outbox,
3946                          struct mlx4_cmd_info *cmd)
3947 {
3948         int err;
3949         int qpn = vhcr->in_modifier & 0x7fffff;
3950         struct res_qp *qp;
3951
3952         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3953         if (err)
3954                 return err;
3955         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3956         if (err)
3957                 goto ex_abort;
3958
3959         atomic_dec(&qp->mtt->ref_count);
3960         atomic_dec(&qp->rcq->ref_count);
3961         atomic_dec(&qp->scq->ref_count);
3962         if (qp->srq)
3963                 atomic_dec(&qp->srq->ref_count);
3964         res_end_move(dev, slave, RES_QP, qpn);
3965         return 0;
3966
3967 ex_abort:
3968         res_abort_move(dev, slave, RES_QP, qpn);
3969
3970         return err;
3971 }
3972
3973 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3974                                 struct res_qp *rqp, u8 *gid)
3975 {
3976         struct res_gid *res;
3977
3978         list_for_each_entry(res, &rqp->mcg_list, list) {
3979                 if (!memcmp(res->gid, gid, 16))
3980                         return res;
3981         }
3982         return NULL;
3983 }
3984
3985 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3986                        u8 *gid, enum mlx4_protocol prot,
3987                        enum mlx4_steer_type steer, u64 reg_id)
3988 {
3989         struct res_gid *res;
3990         int err;
3991
3992         res = kzalloc(sizeof *res, GFP_KERNEL);
3993         if (!res)
3994                 return -ENOMEM;
3995
3996         spin_lock_irq(&rqp->mcg_spl);
3997         if (find_gid(dev, slave, rqp, gid)) {
3998                 kfree(res);
3999                 err = -EEXIST;
4000         } else {
4001                 memcpy(res->gid, gid, 16);
4002                 res->prot = prot;
4003                 res->steer = steer;
4004                 res->reg_id = reg_id;
4005                 list_add_tail(&res->list, &rqp->mcg_list);
4006                 err = 0;
4007         }
4008         spin_unlock_irq(&rqp->mcg_spl);
4009
4010         return err;
4011 }
4012
4013 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4014                        u8 *gid, enum mlx4_protocol prot,
4015                        enum mlx4_steer_type steer, u64 *reg_id)
4016 {
4017         struct res_gid *res;
4018         int err;
4019
4020         spin_lock_irq(&rqp->mcg_spl);
4021         res = find_gid(dev, slave, rqp, gid);
4022         if (!res || res->prot != prot || res->steer != steer)
4023                 err = -EINVAL;
4024         else {
4025                 *reg_id = res->reg_id;
4026                 list_del(&res->list);
4027                 kfree(res);
4028                 err = 0;
4029         }
4030         spin_unlock_irq(&rqp->mcg_spl);
4031
4032         return err;
4033 }
4034
4035 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4036                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4037                      enum mlx4_steer_type type, u64 *reg_id)
4038 {
4039         switch (dev->caps.steering_mode) {
4040         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4041                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4042                 if (port < 0)
4043                         return port;
4044                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4045                                                 block_loopback, prot,
4046                                                 reg_id);
4047         }
4048         case MLX4_STEERING_MODE_B0:
4049                 if (prot == MLX4_PROT_ETH) {
4050                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4051                         if (port < 0)
4052                                 return port;
4053                         gid[5] = port;
4054                 }
4055                 return mlx4_qp_attach_common(dev, qp, gid,
4056                                             block_loopback, prot, type);
4057         default:
4058                 return -EINVAL;
4059         }
4060 }
4061
4062 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4063                      u8 gid[16], enum mlx4_protocol prot,
4064                      enum mlx4_steer_type type, u64 reg_id)
4065 {
4066         switch (dev->caps.steering_mode) {
4067         case MLX4_STEERING_MODE_DEVICE_MANAGED:
4068                 return mlx4_flow_detach(dev, reg_id);
4069         case MLX4_STEERING_MODE_B0:
4070                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4071         default:
4072                 return -EINVAL;
4073         }
4074 }
4075
4076 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4077                             u8 *gid, enum mlx4_protocol prot)
4078 {
4079         int real_port;
4080
4081         if (prot != MLX4_PROT_ETH)
4082                 return 0;
4083
4084         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4085             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4086                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4087                 if (real_port < 0)
4088                         return -EINVAL;
4089                 gid[5] = real_port;
4090         }
4091
4092         return 0;
4093 }
4094
4095 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4096                                struct mlx4_vhcr *vhcr,
4097                                struct mlx4_cmd_mailbox *inbox,
4098                                struct mlx4_cmd_mailbox *outbox,
4099                                struct mlx4_cmd_info *cmd)
4100 {
4101         struct mlx4_qp qp; /* dummy for calling attach/detach */
4102         u8 *gid = inbox->buf;
4103         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4104         int err;
4105         int qpn;
4106         struct res_qp *rqp;
4107         u64 reg_id = 0;
4108         int attach = vhcr->op_modifier;
4109         int block_loopback = vhcr->in_modifier >> 31;
4110         u8 steer_type_mask = 2;
4111         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4112
4113         qpn = vhcr->in_modifier & 0xffffff;
4114         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4115         if (err)
4116                 return err;
4117
4118         qp.qpn = qpn;
4119         if (attach) {
4120                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4121                                 type, &reg_id);
4122                 if (err) {
4123                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4124                         goto ex_put;
4125                 }
4126                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4127                 if (err)
4128                         goto ex_detach;
4129         } else {
4130                 err = mlx4_adjust_port(dev, slave, gid, prot);
4131                 if (err)
4132                         goto ex_put;
4133
4134                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4135                 if (err)
4136                         goto ex_put;
4137
4138                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4139                 if (err)
4140                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4141                                qpn, reg_id);
4142         }
4143         put_res(dev, slave, qpn, RES_QP);
4144         return err;
4145
4146 ex_detach:
4147         qp_detach(dev, &qp, gid, prot, type, reg_id);
4148 ex_put:
4149         put_res(dev, slave, qpn, RES_QP);
4150         return err;
4151 }
4152
4153 /*
4154  * MAC validation for Flow Steering rules.
4155  * VF can attach rules only with a mac address which is assigned to it.
4156  */
4157 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4158                                    struct list_head *rlist)
4159 {
4160         struct mac_res *res, *tmp;
4161         __be64 be_mac;
4162
4163         /* make sure it isn't multicast or broadcast mac*/
4164         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4165             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4166                 list_for_each_entry_safe(res, tmp, rlist, list) {
4167                         be_mac = cpu_to_be64(res->mac << 16);
4168                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4169                                 return 0;
4170                 }
4171                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4172                        eth_header->eth.dst_mac, slave);
4173                 return -EINVAL;
4174         }
4175         return 0;
4176 }
4177
4178 /*
4179  * In case of missing eth header, append eth header with a MAC address
4180  * assigned to the VF.
4181  */
4182 static int add_eth_header(struct mlx4_dev *dev, int slave,
4183                           struct mlx4_cmd_mailbox *inbox,
4184                           struct list_head *rlist, int header_id)
4185 {
4186         struct mac_res *res, *tmp;
4187         u8 port;
4188         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4189         struct mlx4_net_trans_rule_hw_eth *eth_header;
4190         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4191         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4192         __be64 be_mac = 0;
4193         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4194
4195         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4196         port = ctrl->port;
4197         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4198
4199         /* Clear a space in the inbox for eth header */
4200         switch (header_id) {
4201         case MLX4_NET_TRANS_RULE_ID_IPV4:
4202                 ip_header =
4203                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4204                 memmove(ip_header, eth_header,
4205                         sizeof(*ip_header) + sizeof(*l4_header));
4206                 break;
4207         case MLX4_NET_TRANS_RULE_ID_TCP:
4208         case MLX4_NET_TRANS_RULE_ID_UDP:
4209                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4210                             (eth_header + 1);
4211                 memmove(l4_header, eth_header, sizeof(*l4_header));
4212                 break;
4213         default:
4214                 return -EINVAL;
4215         }
4216         list_for_each_entry_safe(res, tmp, rlist, list) {
4217                 if (port == res->port) {
4218                         be_mac = cpu_to_be64(res->mac << 16);
4219                         break;
4220                 }
4221         }
4222         if (!be_mac) {
4223                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4224                        port);
4225                 return -EINVAL;
4226         }
4227
4228         memset(eth_header, 0, sizeof(*eth_header));
4229         eth_header->size = sizeof(*eth_header) >> 2;
4230         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4231         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4232         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4233
4234         return 0;
4235
4236 }
4237
4238 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED      (                                \
4239         1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX                     |\
4240         1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4241 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4242                            struct mlx4_vhcr *vhcr,
4243                            struct mlx4_cmd_mailbox *inbox,
4244                            struct mlx4_cmd_mailbox *outbox,
4245                            struct mlx4_cmd_info *cmd_info)
4246 {
4247         int err;
4248         u32 qpn = vhcr->in_modifier & 0xffffff;
4249         struct res_qp *rqp;
4250         u64 mac;
4251         unsigned port;
4252         u64 pri_addr_path_mask;
4253         struct mlx4_update_qp_context *cmd;
4254         int smac_index;
4255
4256         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4257
4258         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4259         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4260             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4261                 return -EPERM;
4262
4263         if ((pri_addr_path_mask &
4264              (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4265                 !(dev->caps.flags2 &
4266                   MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4267                 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4268                           slave);
4269                 return -ENOTSUPP;
4270         }
4271
4272         /* Just change the smac for the QP */
4273         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4274         if (err) {
4275                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4276                 return err;
4277         }
4278
4279         port = (rqp->sched_queue >> 6 & 1) + 1;
4280
4281         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4282                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4283                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4284                                                 smac_index, &mac);
4285
4286                 if (err) {
4287                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4288                                  qpn, smac_index);
4289                         goto err_mac;
4290                 }
4291         }
4292
4293         err = mlx4_cmd(dev, inbox->dma,
4294                        vhcr->in_modifier, 0,
4295                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4296                        MLX4_CMD_NATIVE);
4297         if (err) {
4298                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4299                 goto err_mac;
4300         }
4301
4302 err_mac:
4303         put_res(dev, slave, qpn, RES_QP);
4304         return err;
4305 }
4306
4307 static u32 qp_attach_mbox_size(void *mbox)
4308 {
4309         u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4310         struct _rule_hw  *rule_header;
4311
4312         rule_header = (struct _rule_hw *)(mbox + size);
4313
4314         while (rule_header->size) {
4315                 size += rule_header->size * sizeof(u32);
4316                 rule_header += 1;
4317         }
4318         return size;
4319 }
4320
4321 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4322
4323 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4324                                          struct mlx4_vhcr *vhcr,
4325                                          struct mlx4_cmd_mailbox *inbox,
4326                                          struct mlx4_cmd_mailbox *outbox,
4327                                          struct mlx4_cmd_info *cmd)
4328 {
4329
4330         struct mlx4_priv *priv = mlx4_priv(dev);
4331         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4332         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4333         int err;
4334         int qpn;
4335         struct res_qp *rqp;
4336         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4337         struct _rule_hw  *rule_header;
4338         int header_id;
4339         struct res_fs_rule *rrule;
4340         u32 mbox_size;
4341
4342         if (dev->caps.steering_mode !=
4343             MLX4_STEERING_MODE_DEVICE_MANAGED)
4344                 return -EOPNOTSUPP;
4345
4346         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4347         err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4348         if (err <= 0)
4349                 return -EINVAL;
4350         ctrl->port = err;
4351         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4352         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4353         if (err) {
4354                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4355                 return err;
4356         }
4357         rule_header = (struct _rule_hw *)(ctrl + 1);
4358         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4359
4360         if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4361                 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
4362
4363         switch (header_id) {
4364         case MLX4_NET_TRANS_RULE_ID_ETH:
4365                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4366                         err = -EINVAL;
4367                         goto err_put_qp;
4368                 }
4369                 break;
4370         case MLX4_NET_TRANS_RULE_ID_IB:
4371                 break;
4372         case MLX4_NET_TRANS_RULE_ID_IPV4:
4373         case MLX4_NET_TRANS_RULE_ID_TCP:
4374         case MLX4_NET_TRANS_RULE_ID_UDP:
4375                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4376                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4377                         err = -EINVAL;
4378                         goto err_put_qp;
4379                 }
4380                 vhcr->in_modifier +=
4381                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4382                 break;
4383         default:
4384                 pr_err("Corrupted mailbox\n");
4385                 err = -EINVAL;
4386                 goto err_put_qp;
4387         }
4388
4389         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4390                            vhcr->in_modifier, 0,
4391                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4392                            MLX4_CMD_NATIVE);
4393         if (err)
4394                 goto err_put_qp;
4395
4396
4397         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4398         if (err) {
4399                 mlx4_err(dev, "Fail to add flow steering resources\n");
4400                 goto err_detach;
4401         }
4402
4403         err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4404         if (err)
4405                 goto err_detach;
4406
4407         mbox_size = qp_attach_mbox_size(inbox->buf);
4408         rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4409         if (!rrule->mirr_mbox) {
4410                 err = -ENOMEM;
4411                 goto err_put_rule;
4412         }
4413         rrule->mirr_mbox_size = mbox_size;
4414         rrule->mirr_rule_id = 0;
4415         memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4416
4417         /* set different port */
4418         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4419         if (ctrl->port == 1)
4420                 ctrl->port = 2;
4421         else
4422                 ctrl->port = 1;
4423
4424         if (mlx4_is_bonded(dev))
4425                 mlx4_do_mirror_rule(dev, rrule);
4426
4427         atomic_inc(&rqp->ref_count);
4428
4429 err_put_rule:
4430         put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4431 err_detach:
4432         /* detach rule on error */
4433         if (err)
4434                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4435                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4436                          MLX4_CMD_NATIVE);
4437 err_put_qp:
4438         put_res(dev, slave, qpn, RES_QP);
4439         return err;
4440 }
4441
4442 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4443 {
4444         int err;
4445
4446         err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4447         if (err) {
4448                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4449                 return err;
4450         }
4451
4452         mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4453                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4454         return 0;
4455 }
4456
4457 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4458                                          struct mlx4_vhcr *vhcr,
4459                                          struct mlx4_cmd_mailbox *inbox,
4460                                          struct mlx4_cmd_mailbox *outbox,
4461                                          struct mlx4_cmd_info *cmd)
4462 {
4463         int err;
4464         struct res_qp *rqp;
4465         struct res_fs_rule *rrule;
4466         u64 mirr_reg_id;
4467         int qpn;
4468
4469         if (dev->caps.steering_mode !=
4470             MLX4_STEERING_MODE_DEVICE_MANAGED)
4471                 return -EOPNOTSUPP;
4472
4473         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4474         if (err)
4475                 return err;
4476
4477         if (!rrule->mirr_mbox) {
4478                 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4479                 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4480                 return -EINVAL;
4481         }
4482         mirr_reg_id = rrule->mirr_rule_id;
4483         kfree(rrule->mirr_mbox);
4484         qpn = rrule->qpn;
4485
4486         /* Release the rule form busy state before removal */
4487         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4488         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4489         if (err)
4490                 return err;
4491
4492         if (mirr_reg_id && mlx4_is_bonded(dev)) {
4493                 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4494                 if (err) {
4495                         mlx4_err(dev, "Fail to get resource of mirror rule\n");
4496                 } else {
4497                         put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4498                         mlx4_undo_mirror_rule(dev, rrule);
4499                 }
4500         }
4501         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4502         if (err) {
4503                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4504                 goto out;
4505         }
4506
4507         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4508                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4509                        MLX4_CMD_NATIVE);
4510         if (!err)
4511                 atomic_dec(&rqp->ref_count);
4512 out:
4513         put_res(dev, slave, qpn, RES_QP);
4514         return err;
4515 }
4516
4517 enum {
4518         BUSY_MAX_RETRIES = 10
4519 };
4520
4521 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4522                                struct mlx4_vhcr *vhcr,
4523                                struct mlx4_cmd_mailbox *inbox,
4524                                struct mlx4_cmd_mailbox *outbox,
4525                                struct mlx4_cmd_info *cmd)
4526 {
4527         int err;
4528         int index = vhcr->in_modifier & 0xffff;
4529
4530         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4531         if (err)
4532                 return err;
4533
4534         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4535         put_res(dev, slave, index, RES_COUNTER);
4536         return err;
4537 }
4538
4539 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4540 {
4541         struct res_gid *rgid;
4542         struct res_gid *tmp;
4543         struct mlx4_qp qp; /* dummy for calling attach/detach */
4544
4545         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4546                 switch (dev->caps.steering_mode) {
4547                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4548                         mlx4_flow_detach(dev, rgid->reg_id);
4549                         break;
4550                 case MLX4_STEERING_MODE_B0:
4551                         qp.qpn = rqp->local_qpn;
4552                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4553                                                      rgid->prot, rgid->steer);
4554                         break;
4555                 }
4556                 list_del(&rgid->list);
4557                 kfree(rgid);
4558         }
4559 }
4560
4561 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4562                           enum mlx4_resource type, int print)
4563 {
4564         struct mlx4_priv *priv = mlx4_priv(dev);
4565         struct mlx4_resource_tracker *tracker =
4566                 &priv->mfunc.master.res_tracker;
4567         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4568         struct res_common *r;
4569         struct res_common *tmp;
4570         int busy;
4571
4572         busy = 0;
4573         spin_lock_irq(mlx4_tlock(dev));
4574         list_for_each_entry_safe(r, tmp, rlist, list) {
4575                 if (r->owner == slave) {
4576                         if (!r->removing) {
4577                                 if (r->state == RES_ANY_BUSY) {
4578                                         if (print)
4579                                                 mlx4_dbg(dev,
4580                                                          "%s id 0x%llx is busy\n",
4581                                                           resource_str(type),
4582                                                           r->res_id);
4583                                         ++busy;
4584                                 } else {
4585                                         r->from_state = r->state;
4586                                         r->state = RES_ANY_BUSY;
4587                                         r->removing = 1;
4588                                 }
4589                         }
4590                 }
4591         }
4592         spin_unlock_irq(mlx4_tlock(dev));
4593
4594         return busy;
4595 }
4596
4597 static int move_all_busy(struct mlx4_dev *dev, int slave,
4598                          enum mlx4_resource type)
4599 {
4600         unsigned long begin;
4601         int busy;
4602
4603         begin = jiffies;
4604         do {
4605                 busy = _move_all_busy(dev, slave, type, 0);
4606                 if (time_after(jiffies, begin + 5 * HZ))
4607                         break;
4608                 if (busy)
4609                         cond_resched();
4610         } while (busy);
4611
4612         if (busy)
4613                 busy = _move_all_busy(dev, slave, type, 1);
4614
4615         return busy;
4616 }
4617 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4618 {
4619         struct mlx4_priv *priv = mlx4_priv(dev);
4620         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4621         struct list_head *qp_list =
4622                 &tracker->slave_list[slave].res_list[RES_QP];
4623         struct res_qp *qp;
4624         struct res_qp *tmp;
4625         int state;
4626         u64 in_param;
4627         int qpn;
4628         int err;
4629
4630         err = move_all_busy(dev, slave, RES_QP);
4631         if (err)
4632                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4633                           slave);
4634
4635         spin_lock_irq(mlx4_tlock(dev));
4636         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4637                 spin_unlock_irq(mlx4_tlock(dev));
4638                 if (qp->com.owner == slave) {
4639                         qpn = qp->com.res_id;
4640                         detach_qp(dev, slave, qp);
4641                         state = qp->com.from_state;
4642                         while (state != 0) {
4643                                 switch (state) {
4644                                 case RES_QP_RESERVED:
4645                                         spin_lock_irq(mlx4_tlock(dev));
4646                                         rb_erase(&qp->com.node,
4647                                                  &tracker->res_tree[RES_QP]);
4648                                         list_del(&qp->com.list);
4649                                         spin_unlock_irq(mlx4_tlock(dev));
4650                                         if (!valid_reserved(dev, slave, qpn)) {
4651                                                 __mlx4_qp_release_range(dev, qpn, 1);
4652                                                 mlx4_release_resource(dev, slave,
4653                                                                       RES_QP, 1, 0);
4654                                         }
4655                                         kfree(qp);
4656                                         state = 0;
4657                                         break;
4658                                 case RES_QP_MAPPED:
4659                                         if (!valid_reserved(dev, slave, qpn))
4660                                                 __mlx4_qp_free_icm(dev, qpn);
4661                                         state = RES_QP_RESERVED;
4662                                         break;
4663                                 case RES_QP_HW:
4664                                         in_param = slave;
4665                                         err = mlx4_cmd(dev, in_param,
4666                                                        qp->local_qpn, 2,
4667                                                        MLX4_CMD_2RST_QP,
4668                                                        MLX4_CMD_TIME_CLASS_A,
4669                                                        MLX4_CMD_NATIVE);
4670                                         if (err)
4671                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4672                                                          slave, qp->local_qpn);
4673                                         atomic_dec(&qp->rcq->ref_count);
4674                                         atomic_dec(&qp->scq->ref_count);
4675                                         atomic_dec(&qp->mtt->ref_count);
4676                                         if (qp->srq)
4677                                                 atomic_dec(&qp->srq->ref_count);
4678                                         state = RES_QP_MAPPED;
4679                                         break;
4680                                 default:
4681                                         state = 0;
4682                                 }
4683                         }
4684                 }
4685                 spin_lock_irq(mlx4_tlock(dev));
4686         }
4687         spin_unlock_irq(mlx4_tlock(dev));
4688 }
4689
4690 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4691 {
4692         struct mlx4_priv *priv = mlx4_priv(dev);
4693         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4694         struct list_head *srq_list =
4695                 &tracker->slave_list[slave].res_list[RES_SRQ];
4696         struct res_srq *srq;
4697         struct res_srq *tmp;
4698         int state;
4699         u64 in_param;
4700         LIST_HEAD(tlist);
4701         int srqn;
4702         int err;
4703
4704         err = move_all_busy(dev, slave, RES_SRQ);
4705         if (err)
4706                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4707                           slave);
4708
4709         spin_lock_irq(mlx4_tlock(dev));
4710         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4711                 spin_unlock_irq(mlx4_tlock(dev));
4712                 if (srq->com.owner == slave) {
4713                         srqn = srq->com.res_id;
4714                         state = srq->com.from_state;
4715                         while (state != 0) {
4716                                 switch (state) {
4717                                 case RES_SRQ_ALLOCATED:
4718                                         __mlx4_srq_free_icm(dev, srqn);
4719                                         spin_lock_irq(mlx4_tlock(dev));
4720                                         rb_erase(&srq->com.node,
4721                                                  &tracker->res_tree[RES_SRQ]);
4722                                         list_del(&srq->com.list);
4723                                         spin_unlock_irq(mlx4_tlock(dev));
4724                                         mlx4_release_resource(dev, slave,
4725                                                               RES_SRQ, 1, 0);
4726                                         kfree(srq);
4727                                         state = 0;
4728                                         break;
4729
4730                                 case RES_SRQ_HW:
4731                                         in_param = slave;
4732                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4733                                                        MLX4_CMD_HW2SW_SRQ,
4734                                                        MLX4_CMD_TIME_CLASS_A,
4735                                                        MLX4_CMD_NATIVE);
4736                                         if (err)
4737                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4738                                                          slave, srqn);
4739
4740                                         atomic_dec(&srq->mtt->ref_count);
4741                                         if (srq->cq)
4742                                                 atomic_dec(&srq->cq->ref_count);
4743                                         state = RES_SRQ_ALLOCATED;
4744                                         break;
4745
4746                                 default:
4747                                         state = 0;
4748                                 }
4749                         }
4750                 }
4751                 spin_lock_irq(mlx4_tlock(dev));
4752         }
4753         spin_unlock_irq(mlx4_tlock(dev));
4754 }
4755
4756 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4757 {
4758         struct mlx4_priv *priv = mlx4_priv(dev);
4759         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4760         struct list_head *cq_list =
4761                 &tracker->slave_list[slave].res_list[RES_CQ];
4762         struct res_cq *cq;
4763         struct res_cq *tmp;
4764         int state;
4765         u64 in_param;
4766         LIST_HEAD(tlist);
4767         int cqn;
4768         int err;
4769
4770         err = move_all_busy(dev, slave, RES_CQ);
4771         if (err)
4772                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4773                           slave);
4774
4775         spin_lock_irq(mlx4_tlock(dev));
4776         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4777                 spin_unlock_irq(mlx4_tlock(dev));
4778                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4779                         cqn = cq->com.res_id;
4780                         state = cq->com.from_state;
4781                         while (state != 0) {
4782                                 switch (state) {
4783                                 case RES_CQ_ALLOCATED:
4784                                         __mlx4_cq_free_icm(dev, cqn);
4785                                         spin_lock_irq(mlx4_tlock(dev));
4786                                         rb_erase(&cq->com.node,
4787                                                  &tracker->res_tree[RES_CQ]);
4788                                         list_del(&cq->com.list);
4789                                         spin_unlock_irq(mlx4_tlock(dev));
4790                                         mlx4_release_resource(dev, slave,
4791                                                               RES_CQ, 1, 0);
4792                                         kfree(cq);
4793                                         state = 0;
4794                                         break;
4795
4796                                 case RES_CQ_HW:
4797                                         in_param = slave;
4798                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4799                                                        MLX4_CMD_HW2SW_CQ,
4800                                                        MLX4_CMD_TIME_CLASS_A,
4801                                                        MLX4_CMD_NATIVE);
4802                                         if (err)
4803                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4804                                                          slave, cqn);
4805                                         atomic_dec(&cq->mtt->ref_count);
4806                                         state = RES_CQ_ALLOCATED;
4807                                         break;
4808
4809                                 default:
4810                                         state = 0;
4811                                 }
4812                         }
4813                 }
4814                 spin_lock_irq(mlx4_tlock(dev));
4815         }
4816         spin_unlock_irq(mlx4_tlock(dev));
4817 }
4818
4819 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4820 {
4821         struct mlx4_priv *priv = mlx4_priv(dev);
4822         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4823         struct list_head *mpt_list =
4824                 &tracker->slave_list[slave].res_list[RES_MPT];
4825         struct res_mpt *mpt;
4826         struct res_mpt *tmp;
4827         int state;
4828         u64 in_param;
4829         LIST_HEAD(tlist);
4830         int mptn;
4831         int err;
4832
4833         err = move_all_busy(dev, slave, RES_MPT);
4834         if (err)
4835                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4836                           slave);
4837
4838         spin_lock_irq(mlx4_tlock(dev));
4839         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4840                 spin_unlock_irq(mlx4_tlock(dev));
4841                 if (mpt->com.owner == slave) {
4842                         mptn = mpt->com.res_id;
4843                         state = mpt->com.from_state;
4844                         while (state != 0) {
4845                                 switch (state) {
4846                                 case RES_MPT_RESERVED:
4847                                         __mlx4_mpt_release(dev, mpt->key);
4848                                         spin_lock_irq(mlx4_tlock(dev));
4849                                         rb_erase(&mpt->com.node,
4850                                                  &tracker->res_tree[RES_MPT]);
4851                                         list_del(&mpt->com.list);
4852                                         spin_unlock_irq(mlx4_tlock(dev));
4853                                         mlx4_release_resource(dev, slave,
4854                                                               RES_MPT, 1, 0);
4855                                         kfree(mpt);
4856                                         state = 0;
4857                                         break;
4858
4859                                 case RES_MPT_MAPPED:
4860                                         __mlx4_mpt_free_icm(dev, mpt->key);
4861                                         state = RES_MPT_RESERVED;
4862                                         break;
4863
4864                                 case RES_MPT_HW:
4865                                         in_param = slave;
4866                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4867                                                      MLX4_CMD_HW2SW_MPT,
4868                                                      MLX4_CMD_TIME_CLASS_A,
4869                                                      MLX4_CMD_NATIVE);
4870                                         if (err)
4871                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4872                                                          slave, mptn);
4873                                         if (mpt->mtt)
4874                                                 atomic_dec(&mpt->mtt->ref_count);
4875                                         state = RES_MPT_MAPPED;
4876                                         break;
4877                                 default:
4878                                         state = 0;
4879                                 }
4880                         }
4881                 }
4882                 spin_lock_irq(mlx4_tlock(dev));
4883         }
4884         spin_unlock_irq(mlx4_tlock(dev));
4885 }
4886
4887 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4888 {
4889         struct mlx4_priv *priv = mlx4_priv(dev);
4890         struct mlx4_resource_tracker *tracker =
4891                 &priv->mfunc.master.res_tracker;
4892         struct list_head *mtt_list =
4893                 &tracker->slave_list[slave].res_list[RES_MTT];
4894         struct res_mtt *mtt;
4895         struct res_mtt *tmp;
4896         int state;
4897         LIST_HEAD(tlist);
4898         int base;
4899         int err;
4900
4901         err = move_all_busy(dev, slave, RES_MTT);
4902         if (err)
4903                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4904                           slave);
4905
4906         spin_lock_irq(mlx4_tlock(dev));
4907         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4908                 spin_unlock_irq(mlx4_tlock(dev));
4909                 if (mtt->com.owner == slave) {
4910                         base = mtt->com.res_id;
4911                         state = mtt->com.from_state;
4912                         while (state != 0) {
4913                                 switch (state) {
4914                                 case RES_MTT_ALLOCATED:
4915                                         __mlx4_free_mtt_range(dev, base,
4916                                                               mtt->order);
4917                                         spin_lock_irq(mlx4_tlock(dev));
4918                                         rb_erase(&mtt->com.node,
4919                                                  &tracker->res_tree[RES_MTT]);
4920                                         list_del(&mtt->com.list);
4921                                         spin_unlock_irq(mlx4_tlock(dev));
4922                                         mlx4_release_resource(dev, slave, RES_MTT,
4923                                                               1 << mtt->order, 0);
4924                                         kfree(mtt);
4925                                         state = 0;
4926                                         break;
4927
4928                                 default:
4929                                         state = 0;
4930                                 }
4931                         }
4932                 }
4933                 spin_lock_irq(mlx4_tlock(dev));
4934         }
4935         spin_unlock_irq(mlx4_tlock(dev));
4936 }
4937
4938 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4939 {
4940         struct mlx4_cmd_mailbox *mailbox;
4941         int err;
4942         struct res_fs_rule *mirr_rule;
4943         u64 reg_id;
4944
4945         mailbox = mlx4_alloc_cmd_mailbox(dev);
4946         if (IS_ERR(mailbox))
4947                 return PTR_ERR(mailbox);
4948
4949         if (!fs_rule->mirr_mbox) {
4950                 mlx4_err(dev, "rule mirroring mailbox is null\n");
4951                 mlx4_free_cmd_mailbox(dev, mailbox);
4952                 return -EINVAL;
4953         }
4954         memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4955         err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4956                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4957                            MLX4_CMD_NATIVE);
4958         mlx4_free_cmd_mailbox(dev, mailbox);
4959
4960         if (err)
4961                 goto err;
4962
4963         err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4964         if (err)
4965                 goto err_detach;
4966
4967         err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4968         if (err)
4969                 goto err_rem;
4970
4971         fs_rule->mirr_rule_id = reg_id;
4972         mirr_rule->mirr_rule_id = 0;
4973         mirr_rule->mirr_mbox_size = 0;
4974         mirr_rule->mirr_mbox = NULL;
4975         put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
4976
4977         return 0;
4978 err_rem:
4979         rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
4980 err_detach:
4981         mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4982                  MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4983 err:
4984         return err;
4985 }
4986
4987 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
4988 {
4989         struct mlx4_priv *priv = mlx4_priv(dev);
4990         struct mlx4_resource_tracker *tracker =
4991                 &priv->mfunc.master.res_tracker;
4992         struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
4993         struct rb_node *p;
4994         struct res_fs_rule *fs_rule;
4995         int err = 0;
4996         LIST_HEAD(mirr_list);
4997
4998         for (p = rb_first(root); p; p = rb_next(p)) {
4999                 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5000                 if ((bond && fs_rule->mirr_mbox_size) ||
5001                     (!bond && !fs_rule->mirr_mbox_size))
5002                         list_add_tail(&fs_rule->mirr_list, &mirr_list);
5003         }
5004
5005         list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5006                 if (bond)
5007                         err += mlx4_do_mirror_rule(dev, fs_rule);
5008                 else
5009                         err += mlx4_undo_mirror_rule(dev, fs_rule);
5010         }
5011         return err;
5012 }
5013
5014 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5015 {
5016         return mlx4_mirror_fs_rules(dev, true);
5017 }
5018
5019 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5020 {
5021         return mlx4_mirror_fs_rules(dev, false);
5022 }
5023
5024 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5025 {
5026         struct mlx4_priv *priv = mlx4_priv(dev);
5027         struct mlx4_resource_tracker *tracker =
5028                 &priv->mfunc.master.res_tracker;
5029         struct list_head *fs_rule_list =
5030                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5031         struct res_fs_rule *fs_rule;
5032         struct res_fs_rule *tmp;
5033         int state;
5034         u64 base;
5035         int err;
5036
5037         err = move_all_busy(dev, slave, RES_FS_RULE);
5038         if (err)
5039                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5040                           slave);
5041
5042         spin_lock_irq(mlx4_tlock(dev));
5043         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5044                 spin_unlock_irq(mlx4_tlock(dev));
5045                 if (fs_rule->com.owner == slave) {
5046                         base = fs_rule->com.res_id;
5047                         state = fs_rule->com.from_state;
5048                         while (state != 0) {
5049                                 switch (state) {
5050                                 case RES_FS_RULE_ALLOCATED:
5051                                         /* detach rule */
5052                                         err = mlx4_cmd(dev, base, 0, 0,
5053                                                        MLX4_QP_FLOW_STEERING_DETACH,
5054                                                        MLX4_CMD_TIME_CLASS_A,
5055                                                        MLX4_CMD_NATIVE);
5056
5057                                         spin_lock_irq(mlx4_tlock(dev));
5058                                         rb_erase(&fs_rule->com.node,
5059                                                  &tracker->res_tree[RES_FS_RULE]);
5060                                         list_del(&fs_rule->com.list);
5061                                         spin_unlock_irq(mlx4_tlock(dev));
5062                                         kfree(fs_rule->mirr_mbox);
5063                                         kfree(fs_rule);
5064                                         state = 0;
5065                                         break;
5066
5067                                 default:
5068                                         state = 0;
5069                                 }
5070                         }
5071                 }
5072                 spin_lock_irq(mlx4_tlock(dev));
5073         }
5074         spin_unlock_irq(mlx4_tlock(dev));
5075 }
5076
5077 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5078 {
5079         struct mlx4_priv *priv = mlx4_priv(dev);
5080         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5081         struct list_head *eq_list =
5082                 &tracker->slave_list[slave].res_list[RES_EQ];
5083         struct res_eq *eq;
5084         struct res_eq *tmp;
5085         int err;
5086         int state;
5087         LIST_HEAD(tlist);
5088         int eqn;
5089
5090         err = move_all_busy(dev, slave, RES_EQ);
5091         if (err)
5092                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5093                           slave);
5094
5095         spin_lock_irq(mlx4_tlock(dev));
5096         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5097                 spin_unlock_irq(mlx4_tlock(dev));
5098                 if (eq->com.owner == slave) {
5099                         eqn = eq->com.res_id;
5100                         state = eq->com.from_state;
5101                         while (state != 0) {
5102                                 switch (state) {
5103                                 case RES_EQ_RESERVED:
5104                                         spin_lock_irq(mlx4_tlock(dev));
5105                                         rb_erase(&eq->com.node,
5106                                                  &tracker->res_tree[RES_EQ]);
5107                                         list_del(&eq->com.list);
5108                                         spin_unlock_irq(mlx4_tlock(dev));
5109                                         kfree(eq);
5110                                         state = 0;
5111                                         break;
5112
5113                                 case RES_EQ_HW:
5114                                         err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5115                                                        1, MLX4_CMD_HW2SW_EQ,
5116                                                        MLX4_CMD_TIME_CLASS_A,
5117                                                        MLX4_CMD_NATIVE);
5118                                         if (err)
5119                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5120                                                          slave, eqn & 0x3ff);
5121                                         atomic_dec(&eq->mtt->ref_count);
5122                                         state = RES_EQ_RESERVED;
5123                                         break;
5124
5125                                 default:
5126                                         state = 0;
5127                                 }
5128                         }
5129                 }
5130                 spin_lock_irq(mlx4_tlock(dev));
5131         }
5132         spin_unlock_irq(mlx4_tlock(dev));
5133 }
5134
5135 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5136 {
5137         struct mlx4_priv *priv = mlx4_priv(dev);
5138         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5139         struct list_head *counter_list =
5140                 &tracker->slave_list[slave].res_list[RES_COUNTER];
5141         struct res_counter *counter;
5142         struct res_counter *tmp;
5143         int err;
5144         int *counters_arr = NULL;
5145         int i, j;
5146
5147         err = move_all_busy(dev, slave, RES_COUNTER);
5148         if (err)
5149                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5150                           slave);
5151
5152         counters_arr = kmalloc_array(dev->caps.max_counters,
5153                                      sizeof(*counters_arr), GFP_KERNEL);
5154         if (!counters_arr)
5155                 return;
5156
5157         do {
5158                 i = 0;
5159                 j = 0;
5160                 spin_lock_irq(mlx4_tlock(dev));
5161                 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5162                         if (counter->com.owner == slave) {
5163                                 counters_arr[i++] = counter->com.res_id;
5164                                 rb_erase(&counter->com.node,
5165                                          &tracker->res_tree[RES_COUNTER]);
5166                                 list_del(&counter->com.list);
5167                                 kfree(counter);
5168                         }
5169                 }
5170                 spin_unlock_irq(mlx4_tlock(dev));
5171
5172                 while (j < i) {
5173                         __mlx4_counter_free(dev, counters_arr[j++]);
5174                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5175                 }
5176         } while (i);
5177
5178         kfree(counters_arr);
5179 }
5180
5181 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5182 {
5183         struct mlx4_priv *priv = mlx4_priv(dev);
5184         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5185         struct list_head *xrcdn_list =
5186                 &tracker->slave_list[slave].res_list[RES_XRCD];
5187         struct res_xrcdn *xrcd;
5188         struct res_xrcdn *tmp;
5189         int err;
5190         int xrcdn;
5191
5192         err = move_all_busy(dev, slave, RES_XRCD);
5193         if (err)
5194                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5195                           slave);
5196
5197         spin_lock_irq(mlx4_tlock(dev));
5198         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5199                 if (xrcd->com.owner == slave) {
5200                         xrcdn = xrcd->com.res_id;
5201                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5202                         list_del(&xrcd->com.list);
5203                         kfree(xrcd);
5204                         __mlx4_xrcd_free(dev, xrcdn);
5205                 }
5206         }
5207         spin_unlock_irq(mlx4_tlock(dev));
5208 }
5209
5210 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5211 {
5212         struct mlx4_priv *priv = mlx4_priv(dev);
5213         mlx4_reset_roce_gids(dev, slave);
5214         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5215         rem_slave_vlans(dev, slave);
5216         rem_slave_macs(dev, slave);
5217         rem_slave_fs_rule(dev, slave);
5218         rem_slave_qps(dev, slave);
5219         rem_slave_srqs(dev, slave);
5220         rem_slave_cqs(dev, slave);
5221         rem_slave_mrs(dev, slave);
5222         rem_slave_eqs(dev, slave);
5223         rem_slave_mtts(dev, slave);
5224         rem_slave_counters(dev, slave);
5225         rem_slave_xrcdns(dev, slave);
5226         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5227 }
5228
5229 static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5230                            struct mlx4_vf_immed_vlan_work *work)
5231 {
5232         ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5233         ctx->qp_context.qos_vport = work->qos_vport;
5234 }
5235
5236 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5237 {
5238         struct mlx4_vf_immed_vlan_work *work =
5239                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5240         struct mlx4_cmd_mailbox *mailbox;
5241         struct mlx4_update_qp_context *upd_context;
5242         struct mlx4_dev *dev = &work->priv->dev;
5243         struct mlx4_resource_tracker *tracker =
5244                 &work->priv->mfunc.master.res_tracker;
5245         struct list_head *qp_list =
5246                 &tracker->slave_list[work->slave].res_list[RES_QP];
5247         struct res_qp *qp;
5248         struct res_qp *tmp;
5249         u64 qp_path_mask_vlan_ctrl =
5250                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5251                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5252                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5253                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5254                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5255                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5256
5257         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5258                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5259                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5260                        (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5261                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5262                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5263                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5264                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5265
5266         int err;
5267         int port, errors = 0;
5268         u8 vlan_control;
5269
5270         if (mlx4_is_slave(dev)) {
5271                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5272                           work->slave);
5273                 goto out;
5274         }
5275
5276         mailbox = mlx4_alloc_cmd_mailbox(dev);
5277         if (IS_ERR(mailbox))
5278                 goto out;
5279         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5280                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5281                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5282                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5283                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5284                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5285                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5286         else if (!work->vlan_id)
5287                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5288                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5289         else if (work->vlan_proto == htons(ETH_P_8021AD))
5290                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5291                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5292                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5293                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5294         else  /* vst 802.1Q */
5295                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5296                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5297                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5298
5299         upd_context = mailbox->buf;
5300         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5301
5302         spin_lock_irq(mlx4_tlock(dev));
5303         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5304                 spin_unlock_irq(mlx4_tlock(dev));
5305                 if (qp->com.owner == work->slave) {
5306                         if (qp->com.from_state != RES_QP_HW ||
5307                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
5308                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5309                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5310                                 spin_lock_irq(mlx4_tlock(dev));
5311                                 continue;
5312                         }
5313                         port = (qp->sched_queue >> 6 & 1) + 1;
5314                         if (port != work->port) {
5315                                 spin_lock_irq(mlx4_tlock(dev));
5316                                 continue;
5317                         }
5318                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5319                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5320                         else
5321                                 upd_context->primary_addr_path_mask =
5322                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5323                         if (work->vlan_id == MLX4_VGT) {
5324                                 upd_context->qp_context.param3 = qp->param3;
5325                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5326                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5327                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5328                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5329                                 upd_context->qp_context.pri_path.feup = qp->feup;
5330                                 upd_context->qp_context.pri_path.sched_queue =
5331                                         qp->sched_queue;
5332                         } else {
5333                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5334                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5335                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5336                                 upd_context->qp_context.pri_path.fvl_rx =
5337                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5338                                 upd_context->qp_context.pri_path.fl =
5339                                         qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5340                                 if (work->vlan_proto == htons(ETH_P_8021AD))
5341                                         upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5342                                 else
5343                                         upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5344                                 upd_context->qp_context.pri_path.feup =
5345                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5346                                 upd_context->qp_context.pri_path.sched_queue =
5347                                         qp->sched_queue & 0xC7;
5348                                 upd_context->qp_context.pri_path.sched_queue |=
5349                                         ((work->qos & 0x7) << 3);
5350
5351                                 if (dev->caps.flags2 &
5352                                     MLX4_DEV_CAP_FLAG2_QOS_VPP)
5353                                         update_qos_vpp(upd_context, work);
5354                         }
5355
5356                         err = mlx4_cmd(dev, mailbox->dma,
5357                                        qp->local_qpn & 0xffffff,
5358                                        0, MLX4_CMD_UPDATE_QP,
5359                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5360                         if (err) {
5361                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5362                                           work->slave, port, qp->local_qpn, err);
5363                                 errors++;
5364                         }
5365                 }
5366                 spin_lock_irq(mlx4_tlock(dev));
5367         }
5368         spin_unlock_irq(mlx4_tlock(dev));
5369         mlx4_free_cmd_mailbox(dev, mailbox);
5370
5371         if (errors)
5372                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5373                          errors, work->slave, work->port);
5374
5375         /* unregister previous vlan_id if needed and we had no errors
5376          * while updating the QPs
5377          */
5378         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5379             NO_INDX != work->orig_vlan_ix)
5380                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5381                                        work->orig_vlan_id);
5382 out:
5383         kfree(work);
5384         return;
5385 }