2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list;
64 struct list_head list;
72 struct list_head list;
87 struct list_head list;
89 enum mlx4_protocol prot;
90 enum mlx4_steer_type steer;
95 RES_QP_BUSY = RES_ANY_BUSY,
97 /* QP number was allocated */
100 /* ICM memory for QP context was mapped */
103 /* QP is in hw ownership */
108 struct res_common com;
113 struct list_head mcg_list;
118 /* saved qp params before VST enforcement in order to restore on VGT */
128 enum res_mtt_states {
129 RES_MTT_BUSY = RES_ANY_BUSY,
133 static inline const char *mtt_states_str(enum res_mtt_states state)
136 case RES_MTT_BUSY: return "RES_MTT_BUSY";
137 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
138 default: return "Unknown";
143 struct res_common com;
148 enum res_mpt_states {
149 RES_MPT_BUSY = RES_ANY_BUSY,
156 struct res_common com;
162 RES_EQ_BUSY = RES_ANY_BUSY,
168 struct res_common com;
173 RES_CQ_BUSY = RES_ANY_BUSY,
179 struct res_common com;
184 enum res_srq_states {
185 RES_SRQ_BUSY = RES_ANY_BUSY,
191 struct res_common com;
197 enum res_counter_states {
198 RES_COUNTER_BUSY = RES_ANY_BUSY,
199 RES_COUNTER_ALLOCATED,
203 struct res_common com;
207 enum res_xrcdn_states {
208 RES_XRCD_BUSY = RES_ANY_BUSY,
213 struct res_common com;
217 enum res_fs_rule_states {
218 RES_FS_RULE_BUSY = RES_ANY_BUSY,
219 RES_FS_RULE_ALLOCATED,
223 struct res_common com;
225 /* VF DMFS mbox with port flipped */
227 /* > 0 --> apply mirror when getting into HA mode */
228 /* = 0 --> un-apply mirror when getting out of HA mode */
230 struct list_head mirr_list;
234 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
236 struct rb_node *node = root->rb_node;
239 struct res_common *res = container_of(node, struct res_common,
242 if (res_id < res->res_id)
243 node = node->rb_left;
244 else if (res_id > res->res_id)
245 node = node->rb_right;
252 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
254 struct rb_node **new = &(root->rb_node), *parent = NULL;
256 /* Figure out where to put new node */
258 struct res_common *this = container_of(*new, struct res_common,
262 if (res->res_id < this->res_id)
263 new = &((*new)->rb_left);
264 else if (res->res_id > this->res_id)
265 new = &((*new)->rb_right);
270 /* Add new node and rebalance tree. */
271 rb_link_node(&res->node, parent, new);
272 rb_insert_color(&res->node, root);
287 static const char *resource_str(enum mlx4_resource rt)
290 case RES_QP: return "RES_QP";
291 case RES_CQ: return "RES_CQ";
292 case RES_SRQ: return "RES_SRQ";
293 case RES_MPT: return "RES_MPT";
294 case RES_MTT: return "RES_MTT";
295 case RES_MAC: return "RES_MAC";
296 case RES_VLAN: return "RES_VLAN";
297 case RES_EQ: return "RES_EQ";
298 case RES_COUNTER: return "RES_COUNTER";
299 case RES_FS_RULE: return "RES_FS_RULE";
300 case RES_XRCD: return "RES_XRCD";
301 default: return "Unknown resource type !!!";
305 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
306 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
307 enum mlx4_resource res_type, int count,
310 struct mlx4_priv *priv = mlx4_priv(dev);
311 struct resource_allocator *res_alloc =
312 &priv->mfunc.master.res_tracker.res_alloc[res_type];
314 int allocated, free, reserved, guaranteed, from_free;
317 if (slave > dev->persist->num_vfs)
320 spin_lock(&res_alloc->alloc_lock);
321 allocated = (port > 0) ?
322 res_alloc->allocated[(port - 1) *
323 (dev->persist->num_vfs + 1) + slave] :
324 res_alloc->allocated[slave];
325 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
327 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
328 res_alloc->res_reserved;
329 guaranteed = res_alloc->guaranteed[slave];
331 if (allocated + count > res_alloc->quota[slave]) {
332 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
333 slave, port, resource_str(res_type), count,
334 allocated, res_alloc->quota[slave]);
338 if (allocated + count <= guaranteed) {
342 /* portion may need to be obtained from free area */
343 if (guaranteed - allocated > 0)
344 from_free = count - (guaranteed - allocated);
348 from_rsvd = count - from_free;
350 if (free - from_free >= reserved)
353 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
354 slave, port, resource_str(res_type), free,
355 from_free, reserved);
359 /* grant the request */
361 res_alloc->allocated[(port - 1) *
362 (dev->persist->num_vfs + 1) + slave] += count;
363 res_alloc->res_port_free[port - 1] -= count;
364 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
366 res_alloc->allocated[slave] += count;
367 res_alloc->res_free -= count;
368 res_alloc->res_reserved -= from_rsvd;
373 spin_unlock(&res_alloc->alloc_lock);
377 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
378 enum mlx4_resource res_type, int count,
381 struct mlx4_priv *priv = mlx4_priv(dev);
382 struct resource_allocator *res_alloc =
383 &priv->mfunc.master.res_tracker.res_alloc[res_type];
384 int allocated, guaranteed, from_rsvd;
386 if (slave > dev->persist->num_vfs)
389 spin_lock(&res_alloc->alloc_lock);
391 allocated = (port > 0) ?
392 res_alloc->allocated[(port - 1) *
393 (dev->persist->num_vfs + 1) + slave] :
394 res_alloc->allocated[slave];
395 guaranteed = res_alloc->guaranteed[slave];
397 if (allocated - count >= guaranteed) {
400 /* portion may need to be returned to reserved area */
401 if (allocated - guaranteed > 0)
402 from_rsvd = count - (allocated - guaranteed);
408 res_alloc->allocated[(port - 1) *
409 (dev->persist->num_vfs + 1) + slave] -= count;
410 res_alloc->res_port_free[port - 1] += count;
411 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
413 res_alloc->allocated[slave] -= count;
414 res_alloc->res_free += count;
415 res_alloc->res_reserved += from_rsvd;
418 spin_unlock(&res_alloc->alloc_lock);
422 static inline void initialize_res_quotas(struct mlx4_dev *dev,
423 struct resource_allocator *res_alloc,
424 enum mlx4_resource res_type,
425 int vf, int num_instances)
427 res_alloc->guaranteed[vf] = num_instances /
428 (2 * (dev->persist->num_vfs + 1));
429 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
430 if (vf == mlx4_master_func_num(dev)) {
431 res_alloc->res_free = num_instances;
432 if (res_type == RES_MTT) {
433 /* reserved mtts will be taken out of the PF allocation */
434 res_alloc->res_free += dev->caps.reserved_mtts;
435 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
436 res_alloc->quota[vf] += dev->caps.reserved_mtts;
441 void mlx4_init_quotas(struct mlx4_dev *dev)
443 struct mlx4_priv *priv = mlx4_priv(dev);
446 /* quotas for VFs are initialized in mlx4_slave_cap */
447 if (mlx4_is_slave(dev))
450 if (!mlx4_is_mfunc(dev)) {
451 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
452 mlx4_num_reserved_sqps(dev);
453 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
454 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
455 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
456 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
460 pf = mlx4_master_func_num(dev);
462 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
464 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
466 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
468 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
470 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
474 mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
475 struct resource_allocator *res_alloc,
478 struct mlx4_active_ports actv_ports;
479 int ports, counters_guaranteed;
481 /* For master, only allocate according to the number of phys ports */
482 if (vf == mlx4_master_func_num(dev))
483 return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
485 /* calculate real number of ports for the VF */
486 actv_ports = mlx4_get_active_ports(dev, vf);
487 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
488 counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
490 /* If we do not have enough counters for this VF, do not
491 * allocate any for it. '-1' to reduce the sink counter.
493 if ((res_alloc->res_reserved + counters_guaranteed) >
494 (dev->caps.max_counters - 1))
497 return counters_guaranteed;
500 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
502 struct mlx4_priv *priv = mlx4_priv(dev);
506 priv->mfunc.master.res_tracker.slave_list =
507 kzalloc(dev->num_slaves * sizeof(struct slave_list),
509 if (!priv->mfunc.master.res_tracker.slave_list)
512 for (i = 0 ; i < dev->num_slaves; i++) {
513 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
514 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
515 slave_list[i].res_list[t]);
516 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
519 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
521 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
522 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
524 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
525 struct resource_allocator *res_alloc =
526 &priv->mfunc.master.res_tracker.res_alloc[i];
527 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
528 sizeof(int), GFP_KERNEL);
529 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
530 sizeof(int), GFP_KERNEL);
531 if (i == RES_MAC || i == RES_VLAN)
532 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
533 (dev->persist->num_vfs
535 sizeof(int), GFP_KERNEL);
537 res_alloc->allocated = kzalloc((dev->persist->
539 sizeof(int), GFP_KERNEL);
540 /* Reduce the sink counter */
541 if (i == RES_COUNTER)
542 res_alloc->res_free = dev->caps.max_counters - 1;
544 if (!res_alloc->quota || !res_alloc->guaranteed ||
545 !res_alloc->allocated)
548 spin_lock_init(&res_alloc->alloc_lock);
549 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
550 struct mlx4_active_ports actv_ports =
551 mlx4_get_active_ports(dev, t);
554 initialize_res_quotas(dev, res_alloc, RES_QP,
555 t, dev->caps.num_qps -
556 dev->caps.reserved_qps -
557 mlx4_num_reserved_sqps(dev));
560 initialize_res_quotas(dev, res_alloc, RES_CQ,
561 t, dev->caps.num_cqs -
562 dev->caps.reserved_cqs);
565 initialize_res_quotas(dev, res_alloc, RES_SRQ,
566 t, dev->caps.num_srqs -
567 dev->caps.reserved_srqs);
570 initialize_res_quotas(dev, res_alloc, RES_MPT,
571 t, dev->caps.num_mpts -
572 dev->caps.reserved_mrws);
575 initialize_res_quotas(dev, res_alloc, RES_MTT,
576 t, dev->caps.num_mtts -
577 dev->caps.reserved_mtts);
580 if (t == mlx4_master_func_num(dev)) {
581 int max_vfs_pport = 0;
582 /* Calculate the max vfs per port for */
584 for (j = 0; j < dev->caps.num_ports;
586 struct mlx4_slaves_pport slaves_pport =
587 mlx4_phys_to_slaves_pport(dev, j + 1);
588 unsigned current_slaves =
589 bitmap_weight(slaves_pport.slaves,
590 dev->caps.num_ports) - 1;
591 if (max_vfs_pport < current_slaves)
595 res_alloc->quota[t] =
598 res_alloc->guaranteed[t] = 2;
599 for (j = 0; j < MLX4_MAX_PORTS; j++)
600 res_alloc->res_port_free[j] =
603 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
604 res_alloc->guaranteed[t] = 2;
608 if (t == mlx4_master_func_num(dev)) {
609 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
610 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
611 for (j = 0; j < MLX4_MAX_PORTS; j++)
612 res_alloc->res_port_free[j] =
615 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
616 res_alloc->guaranteed[t] = 0;
620 res_alloc->quota[t] = dev->caps.max_counters;
621 res_alloc->guaranteed[t] =
622 mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
623 res_alloc->res_free -= res_alloc->guaranteed[t];
628 if (i == RES_MAC || i == RES_VLAN) {
629 for (j = 0; j < dev->caps.num_ports; j++)
630 if (test_bit(j, actv_ports.ports))
631 res_alloc->res_port_rsvd[j] +=
632 res_alloc->guaranteed[t];
634 res_alloc->res_reserved += res_alloc->guaranteed[t];
638 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
642 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
643 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
644 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
645 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
646 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
647 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
648 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
653 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
654 enum mlx4_res_tracker_free_type type)
656 struct mlx4_priv *priv = mlx4_priv(dev);
659 if (priv->mfunc.master.res_tracker.slave_list) {
660 if (type != RES_TR_FREE_STRUCTS_ONLY) {
661 for (i = 0; i < dev->num_slaves; i++) {
662 if (type == RES_TR_FREE_ALL ||
663 dev->caps.function != i)
664 mlx4_delete_all_resources_for_slave(dev, i);
666 /* free master's vlans */
667 i = dev->caps.function;
668 mlx4_reset_roce_gids(dev, i);
669 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
670 rem_slave_vlans(dev, i);
671 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
674 if (type != RES_TR_FREE_SLAVES_ONLY) {
675 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
676 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
677 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
678 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
679 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
680 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
681 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
683 kfree(priv->mfunc.master.res_tracker.slave_list);
684 priv->mfunc.master.res_tracker.slave_list = NULL;
689 static void update_pkey_index(struct mlx4_dev *dev, int slave,
690 struct mlx4_cmd_mailbox *inbox)
692 u8 sched = *(u8 *)(inbox->buf + 64);
693 u8 orig_index = *(u8 *)(inbox->buf + 35);
695 struct mlx4_priv *priv = mlx4_priv(dev);
698 port = (sched >> 6 & 1) + 1;
700 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
701 *(u8 *)(inbox->buf + 35) = new_index;
704 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
707 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
708 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
709 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
712 if (MLX4_QP_ST_UD == ts) {
713 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
714 if (mlx4_is_eth(dev, port))
715 qp_ctx->pri_path.mgid_index =
716 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
718 qp_ctx->pri_path.mgid_index = slave | 0x80;
720 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
721 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
722 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
723 if (mlx4_is_eth(dev, port)) {
724 qp_ctx->pri_path.mgid_index +=
725 mlx4_get_base_gid_ix(dev, slave, port);
726 qp_ctx->pri_path.mgid_index &= 0x7f;
728 qp_ctx->pri_path.mgid_index = slave & 0x7F;
731 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
732 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
733 if (mlx4_is_eth(dev, port)) {
734 qp_ctx->alt_path.mgid_index +=
735 mlx4_get_base_gid_ix(dev, slave, port);
736 qp_ctx->alt_path.mgid_index &= 0x7f;
738 qp_ctx->alt_path.mgid_index = slave & 0x7F;
744 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
747 static int update_vport_qp_param(struct mlx4_dev *dev,
748 struct mlx4_cmd_mailbox *inbox,
751 struct mlx4_qp_context *qpc = inbox->buf + 8;
752 struct mlx4_vport_oper_state *vp_oper;
753 struct mlx4_priv *priv;
757 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
758 priv = mlx4_priv(dev);
759 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
760 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
762 err = handle_counter(dev, qpc, slave, port);
766 if (MLX4_VGT != vp_oper->state.default_vlan) {
767 /* the reserved QPs (special, proxy, tunnel)
768 * do not operate over vlans
770 if (mlx4_is_qp_reserved(dev, qpn))
773 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
774 if (qp_type == MLX4_QP_ST_UD ||
775 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
776 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
777 *(__be32 *)inbox->buf =
778 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
779 MLX4_QP_OPTPAR_VLAN_STRIPPING);
780 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
782 struct mlx4_update_qp_params params = {.flags = 0};
784 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
790 /* preserve IF_COUNTER flag */
791 qpc->pri_path.vlan_control &=
792 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
793 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
794 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
795 qpc->pri_path.vlan_control |=
796 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
797 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
798 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
799 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
800 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
801 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
802 } else if (0 != vp_oper->state.default_vlan) {
803 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
804 /* vst QinQ should block untagged on TX,
805 * but cvlan is in payload and phv is set so
806 * hw see it as untagged. Block tagged instead.
808 qpc->pri_path.vlan_control |=
809 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
810 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
811 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
812 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
813 } else { /* vst 802.1Q */
814 qpc->pri_path.vlan_control |=
815 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
816 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
817 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
819 } else { /* priority tagged */
820 qpc->pri_path.vlan_control |=
821 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
822 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
825 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
826 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
827 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
828 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
829 qpc->pri_path.fl |= MLX4_FL_SV;
831 qpc->pri_path.fl |= MLX4_FL_CV;
832 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
833 qpc->pri_path.sched_queue &= 0xC7;
834 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
835 qpc->qos_vport = vp_oper->state.qos_vport;
837 if (vp_oper->state.spoofchk) {
838 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
839 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
845 static int mpt_mask(struct mlx4_dev *dev)
847 return dev->caps.num_mpts - 1;
850 static void *find_res(struct mlx4_dev *dev, u64 res_id,
851 enum mlx4_resource type)
853 struct mlx4_priv *priv = mlx4_priv(dev);
855 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
859 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
860 enum mlx4_resource type,
863 struct res_common *r;
866 spin_lock_irq(mlx4_tlock(dev));
867 r = find_res(dev, res_id, type);
873 if (r->state == RES_ANY_BUSY) {
878 if (r->owner != slave) {
883 r->from_state = r->state;
884 r->state = RES_ANY_BUSY;
887 *((struct res_common **)res) = r;
890 spin_unlock_irq(mlx4_tlock(dev));
894 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
895 enum mlx4_resource type,
896 u64 res_id, int *slave)
899 struct res_common *r;
905 spin_lock(mlx4_tlock(dev));
907 r = find_res(dev, id, type);
912 spin_unlock(mlx4_tlock(dev));
917 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
918 enum mlx4_resource type)
920 struct res_common *r;
922 spin_lock_irq(mlx4_tlock(dev));
923 r = find_res(dev, res_id, type);
925 r->state = r->from_state;
926 spin_unlock_irq(mlx4_tlock(dev));
929 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
930 u64 in_param, u64 *out_param, int port);
932 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
935 struct res_common *r;
936 struct res_counter *counter;
939 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
942 spin_lock_irq(mlx4_tlock(dev));
943 r = find_res(dev, counter_index, RES_COUNTER);
944 if (!r || r->owner != slave) {
947 counter = container_of(r, struct res_counter, com);
949 counter->port = port;
952 spin_unlock_irq(mlx4_tlock(dev));
956 static int handle_unexisting_counter(struct mlx4_dev *dev,
957 struct mlx4_qp_context *qpc, u8 slave,
960 struct mlx4_priv *priv = mlx4_priv(dev);
961 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
962 struct res_common *tmp;
963 struct res_counter *counter;
964 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
967 spin_lock_irq(mlx4_tlock(dev));
968 list_for_each_entry(tmp,
969 &tracker->slave_list[slave].res_list[RES_COUNTER],
971 counter = container_of(tmp, struct res_counter, com);
972 if (port == counter->port) {
973 qpc->pri_path.counter_index = counter->com.res_id;
974 spin_unlock_irq(mlx4_tlock(dev));
978 spin_unlock_irq(mlx4_tlock(dev));
980 /* No existing counter, need to allocate a new counter */
981 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
983 if (err == -ENOENT) {
985 } else if (err && err != -ENOSPC) {
986 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
987 __func__, slave, err);
989 qpc->pri_path.counter_index = counter_idx;
990 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
991 __func__, slave, qpc->pri_path.counter_index);
998 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1001 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1002 return handle_existing_counter(dev, slave, port,
1003 qpc->pri_path.counter_index);
1005 return handle_unexisting_counter(dev, qpc, slave, port);
1008 static struct res_common *alloc_qp_tr(int id)
1012 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1016 ret->com.res_id = id;
1017 ret->com.state = RES_QP_RESERVED;
1018 ret->local_qpn = id;
1019 INIT_LIST_HEAD(&ret->mcg_list);
1020 spin_lock_init(&ret->mcg_spl);
1021 atomic_set(&ret->ref_count, 0);
1026 static struct res_common *alloc_mtt_tr(int id, int order)
1028 struct res_mtt *ret;
1030 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1034 ret->com.res_id = id;
1036 ret->com.state = RES_MTT_ALLOCATED;
1037 atomic_set(&ret->ref_count, 0);
1042 static struct res_common *alloc_mpt_tr(int id, int key)
1044 struct res_mpt *ret;
1046 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1050 ret->com.res_id = id;
1051 ret->com.state = RES_MPT_RESERVED;
1057 static struct res_common *alloc_eq_tr(int id)
1061 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1065 ret->com.res_id = id;
1066 ret->com.state = RES_EQ_RESERVED;
1071 static struct res_common *alloc_cq_tr(int id)
1075 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1079 ret->com.res_id = id;
1080 ret->com.state = RES_CQ_ALLOCATED;
1081 atomic_set(&ret->ref_count, 0);
1086 static struct res_common *alloc_srq_tr(int id)
1088 struct res_srq *ret;
1090 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1094 ret->com.res_id = id;
1095 ret->com.state = RES_SRQ_ALLOCATED;
1096 atomic_set(&ret->ref_count, 0);
1101 static struct res_common *alloc_counter_tr(int id, int port)
1103 struct res_counter *ret;
1105 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1109 ret->com.res_id = id;
1110 ret->com.state = RES_COUNTER_ALLOCATED;
1116 static struct res_common *alloc_xrcdn_tr(int id)
1118 struct res_xrcdn *ret;
1120 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1124 ret->com.res_id = id;
1125 ret->com.state = RES_XRCD_ALLOCATED;
1130 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1132 struct res_fs_rule *ret;
1134 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1138 ret->com.res_id = id;
1139 ret->com.state = RES_FS_RULE_ALLOCATED;
1144 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1147 struct res_common *ret;
1151 ret = alloc_qp_tr(id);
1154 ret = alloc_mpt_tr(id, extra);
1157 ret = alloc_mtt_tr(id, extra);
1160 ret = alloc_eq_tr(id);
1163 ret = alloc_cq_tr(id);
1166 ret = alloc_srq_tr(id);
1169 pr_err("implementation missing\n");
1172 ret = alloc_counter_tr(id, extra);
1175 ret = alloc_xrcdn_tr(id);
1178 ret = alloc_fs_rule_tr(id, extra);
1189 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1190 struct mlx4_counter *data)
1192 struct mlx4_priv *priv = mlx4_priv(dev);
1193 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1194 struct res_common *tmp;
1195 struct res_counter *counter;
1199 memset(data, 0, sizeof(*data));
1201 counters_arr = kmalloc_array(dev->caps.max_counters,
1202 sizeof(*counters_arr), GFP_KERNEL);
1206 spin_lock_irq(mlx4_tlock(dev));
1207 list_for_each_entry(tmp,
1208 &tracker->slave_list[slave].res_list[RES_COUNTER],
1210 counter = container_of(tmp, struct res_counter, com);
1211 if (counter->port == port) {
1212 counters_arr[i] = (int)tmp->res_id;
1216 spin_unlock_irq(mlx4_tlock(dev));
1217 counters_arr[i] = -1;
1221 while (counters_arr[i] != -1) {
1222 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1225 memset(data, 0, sizeof(*data));
1232 kfree(counters_arr);
1236 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1237 enum mlx4_resource type, int extra)
1241 struct mlx4_priv *priv = mlx4_priv(dev);
1242 struct res_common **res_arr;
1243 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1244 struct rb_root *root = &tracker->res_tree[type];
1246 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1250 for (i = 0; i < count; ++i) {
1251 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1253 for (--i; i >= 0; --i)
1261 spin_lock_irq(mlx4_tlock(dev));
1262 for (i = 0; i < count; ++i) {
1263 if (find_res(dev, base + i, type)) {
1267 err = res_tracker_insert(root, res_arr[i]);
1270 list_add_tail(&res_arr[i]->list,
1271 &tracker->slave_list[slave].res_list[type]);
1273 spin_unlock_irq(mlx4_tlock(dev));
1279 for (--i; i >= 0; --i) {
1280 rb_erase(&res_arr[i]->node, root);
1281 list_del_init(&res_arr[i]->list);
1284 spin_unlock_irq(mlx4_tlock(dev));
1286 for (i = 0; i < count; ++i)
1294 static int remove_qp_ok(struct res_qp *res)
1296 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1297 !list_empty(&res->mcg_list)) {
1298 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1299 res->com.state, atomic_read(&res->ref_count));
1301 } else if (res->com.state != RES_QP_RESERVED) {
1308 static int remove_mtt_ok(struct res_mtt *res, int order)
1310 if (res->com.state == RES_MTT_BUSY ||
1311 atomic_read(&res->ref_count)) {
1312 pr_devel("%s-%d: state %s, ref_count %d\n",
1314 mtt_states_str(res->com.state),
1315 atomic_read(&res->ref_count));
1317 } else if (res->com.state != RES_MTT_ALLOCATED)
1319 else if (res->order != order)
1325 static int remove_mpt_ok(struct res_mpt *res)
1327 if (res->com.state == RES_MPT_BUSY)
1329 else if (res->com.state != RES_MPT_RESERVED)
1335 static int remove_eq_ok(struct res_eq *res)
1337 if (res->com.state == RES_MPT_BUSY)
1339 else if (res->com.state != RES_MPT_RESERVED)
1345 static int remove_counter_ok(struct res_counter *res)
1347 if (res->com.state == RES_COUNTER_BUSY)
1349 else if (res->com.state != RES_COUNTER_ALLOCATED)
1355 static int remove_xrcdn_ok(struct res_xrcdn *res)
1357 if (res->com.state == RES_XRCD_BUSY)
1359 else if (res->com.state != RES_XRCD_ALLOCATED)
1365 static int remove_fs_rule_ok(struct res_fs_rule *res)
1367 if (res->com.state == RES_FS_RULE_BUSY)
1369 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1375 static int remove_cq_ok(struct res_cq *res)
1377 if (res->com.state == RES_CQ_BUSY)
1379 else if (res->com.state != RES_CQ_ALLOCATED)
1385 static int remove_srq_ok(struct res_srq *res)
1387 if (res->com.state == RES_SRQ_BUSY)
1389 else if (res->com.state != RES_SRQ_ALLOCATED)
1395 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1399 return remove_qp_ok((struct res_qp *)res);
1401 return remove_cq_ok((struct res_cq *)res);
1403 return remove_srq_ok((struct res_srq *)res);
1405 return remove_mpt_ok((struct res_mpt *)res);
1407 return remove_mtt_ok((struct res_mtt *)res, extra);
1411 return remove_eq_ok((struct res_eq *)res);
1413 return remove_counter_ok((struct res_counter *)res);
1415 return remove_xrcdn_ok((struct res_xrcdn *)res);
1417 return remove_fs_rule_ok((struct res_fs_rule *)res);
1423 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1424 enum mlx4_resource type, int extra)
1428 struct mlx4_priv *priv = mlx4_priv(dev);
1429 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1430 struct res_common *r;
1432 spin_lock_irq(mlx4_tlock(dev));
1433 for (i = base; i < base + count; ++i) {
1434 r = res_tracker_lookup(&tracker->res_tree[type], i);
1439 if (r->owner != slave) {
1443 err = remove_ok(r, type, extra);
1448 for (i = base; i < base + count; ++i) {
1449 r = res_tracker_lookup(&tracker->res_tree[type], i);
1450 rb_erase(&r->node, &tracker->res_tree[type]);
1457 spin_unlock_irq(mlx4_tlock(dev));
1462 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1463 enum res_qp_states state, struct res_qp **qp,
1466 struct mlx4_priv *priv = mlx4_priv(dev);
1467 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1471 spin_lock_irq(mlx4_tlock(dev));
1472 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1475 else if (r->com.owner != slave)
1480 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1481 __func__, r->com.res_id);
1485 case RES_QP_RESERVED:
1486 if (r->com.state == RES_QP_MAPPED && !alloc)
1489 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1494 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1495 r->com.state == RES_QP_HW)
1498 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1506 if (r->com.state != RES_QP_MAPPED)
1514 r->com.from_state = r->com.state;
1515 r->com.to_state = state;
1516 r->com.state = RES_QP_BUSY;
1522 spin_unlock_irq(mlx4_tlock(dev));
1527 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1528 enum res_mpt_states state, struct res_mpt **mpt)
1530 struct mlx4_priv *priv = mlx4_priv(dev);
1531 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1535 spin_lock_irq(mlx4_tlock(dev));
1536 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1539 else if (r->com.owner != slave)
1547 case RES_MPT_RESERVED:
1548 if (r->com.state != RES_MPT_MAPPED)
1552 case RES_MPT_MAPPED:
1553 if (r->com.state != RES_MPT_RESERVED &&
1554 r->com.state != RES_MPT_HW)
1559 if (r->com.state != RES_MPT_MAPPED)
1567 r->com.from_state = r->com.state;
1568 r->com.to_state = state;
1569 r->com.state = RES_MPT_BUSY;
1575 spin_unlock_irq(mlx4_tlock(dev));
1580 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1581 enum res_eq_states state, struct res_eq **eq)
1583 struct mlx4_priv *priv = mlx4_priv(dev);
1584 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1588 spin_lock_irq(mlx4_tlock(dev));
1589 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1592 else if (r->com.owner != slave)
1600 case RES_EQ_RESERVED:
1601 if (r->com.state != RES_EQ_HW)
1606 if (r->com.state != RES_EQ_RESERVED)
1615 r->com.from_state = r->com.state;
1616 r->com.to_state = state;
1617 r->com.state = RES_EQ_BUSY;
1621 spin_unlock_irq(mlx4_tlock(dev));
1629 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1630 enum res_cq_states state, struct res_cq **cq)
1632 struct mlx4_priv *priv = mlx4_priv(dev);
1633 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1637 spin_lock_irq(mlx4_tlock(dev));
1638 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1641 } else if (r->com.owner != slave) {
1643 } else if (state == RES_CQ_ALLOCATED) {
1644 if (r->com.state != RES_CQ_HW)
1646 else if (atomic_read(&r->ref_count))
1650 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1657 r->com.from_state = r->com.state;
1658 r->com.to_state = state;
1659 r->com.state = RES_CQ_BUSY;
1664 spin_unlock_irq(mlx4_tlock(dev));
1669 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1670 enum res_srq_states state, struct res_srq **srq)
1672 struct mlx4_priv *priv = mlx4_priv(dev);
1673 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1677 spin_lock_irq(mlx4_tlock(dev));
1678 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1681 } else if (r->com.owner != slave) {
1683 } else if (state == RES_SRQ_ALLOCATED) {
1684 if (r->com.state != RES_SRQ_HW)
1686 else if (atomic_read(&r->ref_count))
1688 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1693 r->com.from_state = r->com.state;
1694 r->com.to_state = state;
1695 r->com.state = RES_SRQ_BUSY;
1700 spin_unlock_irq(mlx4_tlock(dev));
1705 static void res_abort_move(struct mlx4_dev *dev, int slave,
1706 enum mlx4_resource type, int id)
1708 struct mlx4_priv *priv = mlx4_priv(dev);
1709 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1710 struct res_common *r;
1712 spin_lock_irq(mlx4_tlock(dev));
1713 r = res_tracker_lookup(&tracker->res_tree[type], id);
1714 if (r && (r->owner == slave))
1715 r->state = r->from_state;
1716 spin_unlock_irq(mlx4_tlock(dev));
1719 static void res_end_move(struct mlx4_dev *dev, int slave,
1720 enum mlx4_resource type, int id)
1722 struct mlx4_priv *priv = mlx4_priv(dev);
1723 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1724 struct res_common *r;
1726 spin_lock_irq(mlx4_tlock(dev));
1727 r = res_tracker_lookup(&tracker->res_tree[type], id);
1728 if (r && (r->owner == slave))
1729 r->state = r->to_state;
1730 spin_unlock_irq(mlx4_tlock(dev));
1733 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1735 return mlx4_is_qp_reserved(dev, qpn) &&
1736 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1739 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1741 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1744 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1745 u64 in_param, u64 *out_param)
1755 case RES_OP_RESERVE:
1756 count = get_param_l(&in_param) & 0xffffff;
1757 /* Turn off all unsupported QP allocation flags that the
1758 * slave tries to set.
1760 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1761 align = get_param_h(&in_param);
1762 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1766 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1768 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1772 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1774 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1775 __mlx4_qp_release_range(dev, base, count);
1778 set_param_l(out_param, base);
1780 case RES_OP_MAP_ICM:
1781 qpn = get_param_l(&in_param) & 0x7fffff;
1782 if (valid_reserved(dev, slave, qpn)) {
1783 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1788 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1793 if (!fw_reserved(dev, qpn)) {
1794 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1796 res_abort_move(dev, slave, RES_QP, qpn);
1801 res_end_move(dev, slave, RES_QP, qpn);
1811 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1812 u64 in_param, u64 *out_param)
1818 if (op != RES_OP_RESERVE_AND_MAP)
1821 order = get_param_l(&in_param);
1823 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1827 base = __mlx4_alloc_mtt_range(dev, order);
1829 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1833 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1835 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1836 __mlx4_free_mtt_range(dev, base, order);
1838 set_param_l(out_param, base);
1844 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1845 u64 in_param, u64 *out_param)
1850 struct res_mpt *mpt;
1853 case RES_OP_RESERVE:
1854 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1858 index = __mlx4_mpt_reserve(dev);
1860 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1863 id = index & mpt_mask(dev);
1865 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1867 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1868 __mlx4_mpt_release(dev, index);
1871 set_param_l(out_param, index);
1873 case RES_OP_MAP_ICM:
1874 index = get_param_l(&in_param);
1875 id = index & mpt_mask(dev);
1876 err = mr_res_start_move_to(dev, slave, id,
1877 RES_MPT_MAPPED, &mpt);
1881 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1883 res_abort_move(dev, slave, RES_MPT, id);
1887 res_end_move(dev, slave, RES_MPT, id);
1893 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1894 u64 in_param, u64 *out_param)
1900 case RES_OP_RESERVE_AND_MAP:
1901 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1905 err = __mlx4_cq_alloc_icm(dev, &cqn);
1907 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1911 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1913 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1914 __mlx4_cq_free_icm(dev, cqn);
1918 set_param_l(out_param, cqn);
1928 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1929 u64 in_param, u64 *out_param)
1935 case RES_OP_RESERVE_AND_MAP:
1936 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1940 err = __mlx4_srq_alloc_icm(dev, &srqn);
1942 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1946 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1948 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1949 __mlx4_srq_free_icm(dev, srqn);
1953 set_param_l(out_param, srqn);
1963 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1964 u8 smac_index, u64 *mac)
1966 struct mlx4_priv *priv = mlx4_priv(dev);
1967 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1968 struct list_head *mac_list =
1969 &tracker->slave_list[slave].res_list[RES_MAC];
1970 struct mac_res *res, *tmp;
1972 list_for_each_entry_safe(res, tmp, mac_list, list) {
1973 if (res->smac_index == smac_index && res->port == (u8) port) {
1981 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1983 struct mlx4_priv *priv = mlx4_priv(dev);
1984 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1985 struct list_head *mac_list =
1986 &tracker->slave_list[slave].res_list[RES_MAC];
1987 struct mac_res *res, *tmp;
1989 list_for_each_entry_safe(res, tmp, mac_list, list) {
1990 if (res->mac == mac && res->port == (u8) port) {
1991 /* mac found. update ref count */
1997 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1999 res = kzalloc(sizeof *res, GFP_KERNEL);
2001 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2005 res->port = (u8) port;
2006 res->smac_index = smac_index;
2008 list_add_tail(&res->list,
2009 &tracker->slave_list[slave].res_list[RES_MAC]);
2013 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2016 struct mlx4_priv *priv = mlx4_priv(dev);
2017 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2018 struct list_head *mac_list =
2019 &tracker->slave_list[slave].res_list[RES_MAC];
2020 struct mac_res *res, *tmp;
2022 list_for_each_entry_safe(res, tmp, mac_list, list) {
2023 if (res->mac == mac && res->port == (u8) port) {
2024 if (!--res->ref_count) {
2025 list_del(&res->list);
2026 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2034 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2036 struct mlx4_priv *priv = mlx4_priv(dev);
2037 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2038 struct list_head *mac_list =
2039 &tracker->slave_list[slave].res_list[RES_MAC];
2040 struct mac_res *res, *tmp;
2043 list_for_each_entry_safe(res, tmp, mac_list, list) {
2044 list_del(&res->list);
2045 /* dereference the mac the num times the slave referenced it */
2046 for (i = 0; i < res->ref_count; i++)
2047 __mlx4_unregister_mac(dev, res->port, res->mac);
2048 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2053 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2054 u64 in_param, u64 *out_param, int in_port)
2061 if (op != RES_OP_RESERVE_AND_MAP)
2064 port = !in_port ? get_param_l(out_param) : in_port;
2065 port = mlx4_slave_convert_port(
2072 err = __mlx4_register_mac(dev, port, mac);
2075 set_param_l(out_param, err);
2080 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2082 __mlx4_unregister_mac(dev, port, mac);
2087 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2088 int port, int vlan_index)
2090 struct mlx4_priv *priv = mlx4_priv(dev);
2091 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2092 struct list_head *vlan_list =
2093 &tracker->slave_list[slave].res_list[RES_VLAN];
2094 struct vlan_res *res, *tmp;
2096 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2097 if (res->vlan == vlan && res->port == (u8) port) {
2098 /* vlan found. update ref count */
2104 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2106 res = kzalloc(sizeof(*res), GFP_KERNEL);
2108 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2112 res->port = (u8) port;
2113 res->vlan_index = vlan_index;
2115 list_add_tail(&res->list,
2116 &tracker->slave_list[slave].res_list[RES_VLAN]);
2121 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2124 struct mlx4_priv *priv = mlx4_priv(dev);
2125 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2126 struct list_head *vlan_list =
2127 &tracker->slave_list[slave].res_list[RES_VLAN];
2128 struct vlan_res *res, *tmp;
2130 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2131 if (res->vlan == vlan && res->port == (u8) port) {
2132 if (!--res->ref_count) {
2133 list_del(&res->list);
2134 mlx4_release_resource(dev, slave, RES_VLAN,
2143 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2145 struct mlx4_priv *priv = mlx4_priv(dev);
2146 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2147 struct list_head *vlan_list =
2148 &tracker->slave_list[slave].res_list[RES_VLAN];
2149 struct vlan_res *res, *tmp;
2152 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2153 list_del(&res->list);
2154 /* dereference the vlan the num times the slave referenced it */
2155 for (i = 0; i < res->ref_count; i++)
2156 __mlx4_unregister_vlan(dev, res->port, res->vlan);
2157 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2162 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2163 u64 in_param, u64 *out_param, int in_port)
2165 struct mlx4_priv *priv = mlx4_priv(dev);
2166 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2172 port = !in_port ? get_param_l(out_param) : in_port;
2174 if (!port || op != RES_OP_RESERVE_AND_MAP)
2177 port = mlx4_slave_convert_port(
2182 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2183 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2184 slave_state[slave].old_vlan_api = true;
2188 vlan = (u16) in_param;
2190 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2192 set_param_l(out_param, (u32) vlan_index);
2193 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2195 __mlx4_unregister_vlan(dev, port, vlan);
2200 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2201 u64 in_param, u64 *out_param, int port)
2206 if (op != RES_OP_RESERVE)
2209 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2213 err = __mlx4_counter_alloc(dev, &index);
2215 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2219 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2221 __mlx4_counter_free(dev, index);
2222 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2224 set_param_l(out_param, index);
2230 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2231 u64 in_param, u64 *out_param)
2236 if (op != RES_OP_RESERVE)
2239 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2243 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2245 __mlx4_xrcd_free(dev, xrcdn);
2247 set_param_l(out_param, xrcdn);
2252 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2253 struct mlx4_vhcr *vhcr,
2254 struct mlx4_cmd_mailbox *inbox,
2255 struct mlx4_cmd_mailbox *outbox,
2256 struct mlx4_cmd_info *cmd)
2259 int alop = vhcr->op_modifier;
2261 switch (vhcr->in_modifier & 0xFF) {
2263 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2264 vhcr->in_param, &vhcr->out_param);
2268 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2269 vhcr->in_param, &vhcr->out_param);
2273 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2274 vhcr->in_param, &vhcr->out_param);
2278 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2279 vhcr->in_param, &vhcr->out_param);
2283 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2284 vhcr->in_param, &vhcr->out_param);
2288 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2289 vhcr->in_param, &vhcr->out_param,
2290 (vhcr->in_modifier >> 8) & 0xFF);
2294 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2295 vhcr->in_param, &vhcr->out_param,
2296 (vhcr->in_modifier >> 8) & 0xFF);
2300 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2301 vhcr->in_param, &vhcr->out_param, 0);
2305 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2306 vhcr->in_param, &vhcr->out_param);
2317 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2326 case RES_OP_RESERVE:
2327 base = get_param_l(&in_param) & 0x7fffff;
2328 count = get_param_h(&in_param);
2329 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2332 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2333 __mlx4_qp_release_range(dev, base, count);
2335 case RES_OP_MAP_ICM:
2336 qpn = get_param_l(&in_param) & 0x7fffff;
2337 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2342 if (!fw_reserved(dev, qpn))
2343 __mlx4_qp_free_icm(dev, qpn);
2345 res_end_move(dev, slave, RES_QP, qpn);
2347 if (valid_reserved(dev, slave, qpn))
2348 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2357 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2358 u64 in_param, u64 *out_param)
2364 if (op != RES_OP_RESERVE_AND_MAP)
2367 base = get_param_l(&in_param);
2368 order = get_param_h(&in_param);
2369 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2371 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2372 __mlx4_free_mtt_range(dev, base, order);
2377 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2383 struct res_mpt *mpt;
2386 case RES_OP_RESERVE:
2387 index = get_param_l(&in_param);
2388 id = index & mpt_mask(dev);
2389 err = get_res(dev, slave, id, RES_MPT, &mpt);
2393 put_res(dev, slave, id, RES_MPT);
2395 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2398 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2399 __mlx4_mpt_release(dev, index);
2401 case RES_OP_MAP_ICM:
2402 index = get_param_l(&in_param);
2403 id = index & mpt_mask(dev);
2404 err = mr_res_start_move_to(dev, slave, id,
2405 RES_MPT_RESERVED, &mpt);
2409 __mlx4_mpt_free_icm(dev, mpt->key);
2410 res_end_move(dev, slave, RES_MPT, id);
2419 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2420 u64 in_param, u64 *out_param)
2426 case RES_OP_RESERVE_AND_MAP:
2427 cqn = get_param_l(&in_param);
2428 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2432 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2433 __mlx4_cq_free_icm(dev, cqn);
2444 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2445 u64 in_param, u64 *out_param)
2451 case RES_OP_RESERVE_AND_MAP:
2452 srqn = get_param_l(&in_param);
2453 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2457 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2458 __mlx4_srq_free_icm(dev, srqn);
2469 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2470 u64 in_param, u64 *out_param, int in_port)
2476 case RES_OP_RESERVE_AND_MAP:
2477 port = !in_port ? get_param_l(out_param) : in_port;
2478 port = mlx4_slave_convert_port(
2483 mac_del_from_slave(dev, slave, in_param, port);
2484 __mlx4_unregister_mac(dev, port, in_param);
2495 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2496 u64 in_param, u64 *out_param, int port)
2498 struct mlx4_priv *priv = mlx4_priv(dev);
2499 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2502 port = mlx4_slave_convert_port(
2508 case RES_OP_RESERVE_AND_MAP:
2509 if (slave_state[slave].old_vlan_api)
2513 vlan_del_from_slave(dev, slave, in_param, port);
2514 __mlx4_unregister_vlan(dev, port, in_param);
2524 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2525 u64 in_param, u64 *out_param)
2530 if (op != RES_OP_RESERVE)
2533 index = get_param_l(&in_param);
2534 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2537 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2541 __mlx4_counter_free(dev, index);
2542 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2547 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2548 u64 in_param, u64 *out_param)
2553 if (op != RES_OP_RESERVE)
2556 xrcdn = get_param_l(&in_param);
2557 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2561 __mlx4_xrcd_free(dev, xrcdn);
2566 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2567 struct mlx4_vhcr *vhcr,
2568 struct mlx4_cmd_mailbox *inbox,
2569 struct mlx4_cmd_mailbox *outbox,
2570 struct mlx4_cmd_info *cmd)
2573 int alop = vhcr->op_modifier;
2575 switch (vhcr->in_modifier & 0xFF) {
2577 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2582 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2583 vhcr->in_param, &vhcr->out_param);
2587 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2592 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2593 vhcr->in_param, &vhcr->out_param);
2597 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2598 vhcr->in_param, &vhcr->out_param);
2602 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2603 vhcr->in_param, &vhcr->out_param,
2604 (vhcr->in_modifier >> 8) & 0xFF);
2608 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2609 vhcr->in_param, &vhcr->out_param,
2610 (vhcr->in_modifier >> 8) & 0xFF);
2614 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2615 vhcr->in_param, &vhcr->out_param);
2619 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2620 vhcr->in_param, &vhcr->out_param);
2628 /* ugly but other choices are uglier */
2629 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2631 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2634 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2636 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2639 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2641 return be32_to_cpu(mpt->mtt_sz);
2644 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2646 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2649 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2651 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2654 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2656 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2659 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2661 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2664 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2666 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2669 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2671 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2674 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2676 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2677 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2678 int log_sq_sride = qpc->sq_size_stride & 7;
2679 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2680 int log_rq_stride = qpc->rq_size_stride & 7;
2681 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2682 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2683 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2684 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2689 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2692 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2693 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2694 total_mem = sq_size + rq_size;
2695 tot = (total_mem + (page_offset << 6)) >> page_shift;
2696 total_pages = !tot ? 1 : roundup_pow_of_two(tot);
2701 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2702 int size, struct res_mtt *mtt)
2704 int res_start = mtt->com.res_id;
2705 int res_size = (1 << mtt->order);
2707 if (start < res_start || start + size > res_start + res_size)
2712 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2713 struct mlx4_vhcr *vhcr,
2714 struct mlx4_cmd_mailbox *inbox,
2715 struct mlx4_cmd_mailbox *outbox,
2716 struct mlx4_cmd_info *cmd)
2719 int index = vhcr->in_modifier;
2720 struct res_mtt *mtt;
2721 struct res_mpt *mpt;
2722 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2728 id = index & mpt_mask(dev);
2729 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2733 /* Disable memory windows for VFs. */
2734 if (!mr_is_region(inbox->buf)) {
2739 /* Make sure that the PD bits related to the slave id are zeros. */
2740 pd = mr_get_pd(inbox->buf);
2741 pd_slave = (pd >> 17) & 0x7f;
2742 if (pd_slave != 0 && --pd_slave != slave) {
2747 if (mr_is_fmr(inbox->buf)) {
2748 /* FMR and Bind Enable are forbidden in slave devices. */
2749 if (mr_is_bind_enabled(inbox->buf)) {
2753 /* FMR and Memory Windows are also forbidden. */
2754 if (!mr_is_region(inbox->buf)) {
2760 phys = mr_phys_mpt(inbox->buf);
2762 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2766 err = check_mtt_range(dev, slave, mtt_base,
2767 mr_get_mtt_size(inbox->buf), mtt);
2774 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2779 atomic_inc(&mtt->ref_count);
2780 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2783 res_end_move(dev, slave, RES_MPT, id);
2788 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2790 res_abort_move(dev, slave, RES_MPT, id);
2795 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2796 struct mlx4_vhcr *vhcr,
2797 struct mlx4_cmd_mailbox *inbox,
2798 struct mlx4_cmd_mailbox *outbox,
2799 struct mlx4_cmd_info *cmd)
2802 int index = vhcr->in_modifier;
2803 struct res_mpt *mpt;
2806 id = index & mpt_mask(dev);
2807 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2811 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2816 atomic_dec(&mpt->mtt->ref_count);
2818 res_end_move(dev, slave, RES_MPT, id);
2822 res_abort_move(dev, slave, RES_MPT, id);
2827 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2828 struct mlx4_vhcr *vhcr,
2829 struct mlx4_cmd_mailbox *inbox,
2830 struct mlx4_cmd_mailbox *outbox,
2831 struct mlx4_cmd_info *cmd)
2834 int index = vhcr->in_modifier;
2835 struct res_mpt *mpt;
2838 id = index & mpt_mask(dev);
2839 err = get_res(dev, slave, id, RES_MPT, &mpt);
2843 if (mpt->com.from_state == RES_MPT_MAPPED) {
2844 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2845 * that, the VF must read the MPT. But since the MPT entry memory is not
2846 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2847 * entry contents. To guarantee that the MPT cannot be changed, the driver
2848 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2849 * ownership fofollowing the change. The change here allows the VF to
2850 * perform QUERY_MPT also when the entry is in SW ownership.
2852 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2853 &mlx4_priv(dev)->mr_table.dmpt_table,
2856 if (NULL == mpt_entry || NULL == outbox->buf) {
2861 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2864 } else if (mpt->com.from_state == RES_MPT_HW) {
2865 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2873 put_res(dev, slave, id, RES_MPT);
2877 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2879 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2882 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2884 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2887 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2889 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2892 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2893 struct mlx4_qp_context *context)
2895 u32 qpn = vhcr->in_modifier & 0xffffff;
2898 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2901 /* adjust qkey in qp context */
2902 context->qkey = cpu_to_be32(qkey);
2905 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2906 struct mlx4_qp_context *qpc,
2907 struct mlx4_cmd_mailbox *inbox);
2909 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2910 struct mlx4_vhcr *vhcr,
2911 struct mlx4_cmd_mailbox *inbox,
2912 struct mlx4_cmd_mailbox *outbox,
2913 struct mlx4_cmd_info *cmd)
2916 int qpn = vhcr->in_modifier & 0x7fffff;
2917 struct res_mtt *mtt;
2919 struct mlx4_qp_context *qpc = inbox->buf + 8;
2920 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2921 int mtt_size = qp_get_mtt_size(qpc);
2924 int rcqn = qp_get_rcqn(qpc);
2925 int scqn = qp_get_scqn(qpc);
2926 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2927 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2928 struct res_srq *srq;
2929 int local_qpn = vhcr->in_modifier & 0xffffff;
2931 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2935 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2938 qp->local_qpn = local_qpn;
2939 qp->sched_queue = 0;
2941 qp->vlan_control = 0;
2943 qp->pri_path_fl = 0;
2946 qp->qpc_flags = be32_to_cpu(qpc->flags);
2948 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2952 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2956 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2961 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2968 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2973 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2974 update_pkey_index(dev, slave, inbox);
2975 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2978 atomic_inc(&mtt->ref_count);
2980 atomic_inc(&rcq->ref_count);
2982 atomic_inc(&scq->ref_count);
2986 put_res(dev, slave, scqn, RES_CQ);
2989 atomic_inc(&srq->ref_count);
2990 put_res(dev, slave, srqn, RES_SRQ);
2994 /* Save param3 for dynamic changes from VST back to VGT */
2995 qp->param3 = qpc->param3;
2996 put_res(dev, slave, rcqn, RES_CQ);
2997 put_res(dev, slave, mtt_base, RES_MTT);
2998 res_end_move(dev, slave, RES_QP, qpn);
3004 put_res(dev, slave, srqn, RES_SRQ);
3007 put_res(dev, slave, scqn, RES_CQ);
3009 put_res(dev, slave, rcqn, RES_CQ);
3011 put_res(dev, slave, mtt_base, RES_MTT);
3013 res_abort_move(dev, slave, RES_QP, qpn);
3018 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3020 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3023 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3025 int log_eq_size = eqc->log_eq_size & 0x1f;
3026 int page_shift = (eqc->log_page_size & 0x3f) + 12;
3028 if (log_eq_size + 5 < page_shift)
3031 return 1 << (log_eq_size + 5 - page_shift);
3034 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3036 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3039 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3041 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3042 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3044 if (log_cq_size + 5 < page_shift)
3047 return 1 << (log_cq_size + 5 - page_shift);
3050 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3051 struct mlx4_vhcr *vhcr,
3052 struct mlx4_cmd_mailbox *inbox,
3053 struct mlx4_cmd_mailbox *outbox,
3054 struct mlx4_cmd_info *cmd)
3057 int eqn = vhcr->in_modifier;
3058 int res_id = (slave << 10) | eqn;
3059 struct mlx4_eq_context *eqc = inbox->buf;
3060 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3061 int mtt_size = eq_get_mtt_size(eqc);
3063 struct res_mtt *mtt;
3065 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3068 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3072 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3076 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3080 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3084 atomic_inc(&mtt->ref_count);
3086 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3087 res_end_move(dev, slave, RES_EQ, res_id);
3091 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3093 res_abort_move(dev, slave, RES_EQ, res_id);
3095 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3099 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3100 struct mlx4_vhcr *vhcr,
3101 struct mlx4_cmd_mailbox *inbox,
3102 struct mlx4_cmd_mailbox *outbox,
3103 struct mlx4_cmd_info *cmd)
3106 u8 get = vhcr->op_modifier;
3111 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3116 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3117 int len, struct res_mtt **res)
3119 struct mlx4_priv *priv = mlx4_priv(dev);
3120 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3121 struct res_mtt *mtt;
3124 spin_lock_irq(mlx4_tlock(dev));
3125 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3127 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3129 mtt->com.from_state = mtt->com.state;
3130 mtt->com.state = RES_MTT_BUSY;
3135 spin_unlock_irq(mlx4_tlock(dev));
3140 static int verify_qp_parameters(struct mlx4_dev *dev,
3141 struct mlx4_vhcr *vhcr,
3142 struct mlx4_cmd_mailbox *inbox,
3143 enum qp_transition transition, u8 slave)
3147 struct mlx4_qp_context *qp_ctx;
3148 enum mlx4_qp_optpar optpar;
3152 qp_ctx = inbox->buf + 8;
3153 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3154 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3156 if (slave != mlx4_master_func_num(dev)) {
3157 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3158 /* setting QP rate-limit is disallowed for VFs */
3159 if (qp_ctx->rate_limit_params)
3165 case MLX4_QP_ST_XRC:
3167 switch (transition) {
3168 case QP_TRANS_INIT2RTR:
3169 case QP_TRANS_RTR2RTS:
3170 case QP_TRANS_RTS2RTS:
3171 case QP_TRANS_SQD2SQD:
3172 case QP_TRANS_SQD2RTS:
3173 if (slave != mlx4_master_func_num(dev)) {
3174 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3175 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3176 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3177 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3180 if (qp_ctx->pri_path.mgid_index >= num_gids)
3183 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3184 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3185 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3186 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3189 if (qp_ctx->alt_path.mgid_index >= num_gids)
3199 case MLX4_QP_ST_MLX:
3200 qpn = vhcr->in_modifier & 0x7fffff;
3201 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3202 if (transition == QP_TRANS_INIT2RTR &&
3203 slave != mlx4_master_func_num(dev) &&
3204 mlx4_is_qp_reserved(dev, qpn) &&
3205 !mlx4_vf_smi_enabled(dev, slave, port)) {
3206 /* only enabled VFs may create MLX proxy QPs */
3207 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3208 __func__, slave, port);
3220 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3221 struct mlx4_vhcr *vhcr,
3222 struct mlx4_cmd_mailbox *inbox,
3223 struct mlx4_cmd_mailbox *outbox,
3224 struct mlx4_cmd_info *cmd)
3226 struct mlx4_mtt mtt;
3227 __be64 *page_list = inbox->buf;
3228 u64 *pg_list = (u64 *)page_list;
3230 struct res_mtt *rmtt = NULL;
3231 int start = be64_to_cpu(page_list[0]);
3232 int npages = vhcr->in_modifier;
3235 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3239 /* Call the SW implementation of write_mtt:
3240 * - Prepare a dummy mtt struct
3241 * - Translate inbox contents to simple addresses in host endianness */
3242 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3243 we don't really use it */
3246 for (i = 0; i < npages; ++i)
3247 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3249 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3250 ((u64 *)page_list + 2));
3253 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3258 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3259 struct mlx4_vhcr *vhcr,
3260 struct mlx4_cmd_mailbox *inbox,
3261 struct mlx4_cmd_mailbox *outbox,
3262 struct mlx4_cmd_info *cmd)
3264 int eqn = vhcr->in_modifier;
3265 int res_id = eqn | (slave << 10);
3269 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3273 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3277 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3281 atomic_dec(&eq->mtt->ref_count);
3282 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3283 res_end_move(dev, slave, RES_EQ, res_id);
3284 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3289 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3291 res_abort_move(dev, slave, RES_EQ, res_id);
3296 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3298 struct mlx4_priv *priv = mlx4_priv(dev);
3299 struct mlx4_slave_event_eq_info *event_eq;
3300 struct mlx4_cmd_mailbox *mailbox;
3301 u32 in_modifier = 0;
3306 if (!priv->mfunc.master.slave_state)
3309 /* check for slave valid, slave not PF, and slave active */
3310 if (slave < 0 || slave > dev->persist->num_vfs ||
3311 slave == dev->caps.function ||
3312 !priv->mfunc.master.slave_state[slave].active)
3315 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3317 /* Create the event only if the slave is registered */
3318 if (event_eq->eqn < 0)
3321 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3322 res_id = (slave << 10) | event_eq->eqn;
3323 err = get_res(dev, slave, res_id, RES_EQ, &req);
3327 if (req->com.from_state != RES_EQ_HW) {
3332 mailbox = mlx4_alloc_cmd_mailbox(dev);
3333 if (IS_ERR(mailbox)) {
3334 err = PTR_ERR(mailbox);
3338 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3340 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3343 memcpy(mailbox->buf, (u8 *) eqe, 28);
3345 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3347 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3348 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3351 put_res(dev, slave, res_id, RES_EQ);
3352 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3353 mlx4_free_cmd_mailbox(dev, mailbox);
3357 put_res(dev, slave, res_id, RES_EQ);
3360 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3364 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3365 struct mlx4_vhcr *vhcr,
3366 struct mlx4_cmd_mailbox *inbox,
3367 struct mlx4_cmd_mailbox *outbox,
3368 struct mlx4_cmd_info *cmd)
3370 int eqn = vhcr->in_modifier;
3371 int res_id = eqn | (slave << 10);
3375 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3379 if (eq->com.from_state != RES_EQ_HW) {
3384 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3387 put_res(dev, slave, res_id, RES_EQ);
3391 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3392 struct mlx4_vhcr *vhcr,
3393 struct mlx4_cmd_mailbox *inbox,
3394 struct mlx4_cmd_mailbox *outbox,
3395 struct mlx4_cmd_info *cmd)
3398 int cqn = vhcr->in_modifier;
3399 struct mlx4_cq_context *cqc = inbox->buf;
3400 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3401 struct res_cq *cq = NULL;
3402 struct res_mtt *mtt;
3404 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3407 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3410 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3413 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3416 atomic_inc(&mtt->ref_count);
3418 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3419 res_end_move(dev, slave, RES_CQ, cqn);
3423 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3425 res_abort_move(dev, slave, RES_CQ, cqn);
3429 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3430 struct mlx4_vhcr *vhcr,
3431 struct mlx4_cmd_mailbox *inbox,
3432 struct mlx4_cmd_mailbox *outbox,
3433 struct mlx4_cmd_info *cmd)
3436 int cqn = vhcr->in_modifier;
3437 struct res_cq *cq = NULL;
3439 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3442 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3445 atomic_dec(&cq->mtt->ref_count);
3446 res_end_move(dev, slave, RES_CQ, cqn);
3450 res_abort_move(dev, slave, RES_CQ, cqn);
3454 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3455 struct mlx4_vhcr *vhcr,
3456 struct mlx4_cmd_mailbox *inbox,
3457 struct mlx4_cmd_mailbox *outbox,
3458 struct mlx4_cmd_info *cmd)
3460 int cqn = vhcr->in_modifier;
3464 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3468 if (cq->com.from_state != RES_CQ_HW)
3471 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3473 put_res(dev, slave, cqn, RES_CQ);
3478 static int handle_resize(struct mlx4_dev *dev, int slave,
3479 struct mlx4_vhcr *vhcr,
3480 struct mlx4_cmd_mailbox *inbox,
3481 struct mlx4_cmd_mailbox *outbox,
3482 struct mlx4_cmd_info *cmd,
3486 struct res_mtt *orig_mtt;
3487 struct res_mtt *mtt;
3488 struct mlx4_cq_context *cqc = inbox->buf;
3489 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3491 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3495 if (orig_mtt != cq->mtt) {
3500 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3504 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3507 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3510 atomic_dec(&orig_mtt->ref_count);
3511 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3512 atomic_inc(&mtt->ref_count);
3514 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3518 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3520 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3526 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3527 struct mlx4_vhcr *vhcr,
3528 struct mlx4_cmd_mailbox *inbox,
3529 struct mlx4_cmd_mailbox *outbox,
3530 struct mlx4_cmd_info *cmd)
3532 int cqn = vhcr->in_modifier;
3536 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3540 if (cq->com.from_state != RES_CQ_HW)
3543 if (vhcr->op_modifier == 0) {
3544 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3548 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3550 put_res(dev, slave, cqn, RES_CQ);
3555 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3557 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3558 int log_rq_stride = srqc->logstride & 7;
3559 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3561 if (log_srq_size + log_rq_stride + 4 < page_shift)
3564 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3567 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3568 struct mlx4_vhcr *vhcr,
3569 struct mlx4_cmd_mailbox *inbox,
3570 struct mlx4_cmd_mailbox *outbox,
3571 struct mlx4_cmd_info *cmd)
3574 int srqn = vhcr->in_modifier;
3575 struct res_mtt *mtt;
3576 struct res_srq *srq = NULL;
3577 struct mlx4_srq_context *srqc = inbox->buf;
3578 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3580 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3583 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3586 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3589 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3594 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3598 atomic_inc(&mtt->ref_count);
3600 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3601 res_end_move(dev, slave, RES_SRQ, srqn);
3605 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3607 res_abort_move(dev, slave, RES_SRQ, srqn);
3612 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3613 struct mlx4_vhcr *vhcr,
3614 struct mlx4_cmd_mailbox *inbox,
3615 struct mlx4_cmd_mailbox *outbox,
3616 struct mlx4_cmd_info *cmd)
3619 int srqn = vhcr->in_modifier;
3620 struct res_srq *srq = NULL;
3622 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3625 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3628 atomic_dec(&srq->mtt->ref_count);
3630 atomic_dec(&srq->cq->ref_count);
3631 res_end_move(dev, slave, RES_SRQ, srqn);
3636 res_abort_move(dev, slave, RES_SRQ, srqn);
3641 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3642 struct mlx4_vhcr *vhcr,
3643 struct mlx4_cmd_mailbox *inbox,
3644 struct mlx4_cmd_mailbox *outbox,
3645 struct mlx4_cmd_info *cmd)
3648 int srqn = vhcr->in_modifier;
3649 struct res_srq *srq;
3651 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3654 if (srq->com.from_state != RES_SRQ_HW) {
3658 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3660 put_res(dev, slave, srqn, RES_SRQ);
3664 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3665 struct mlx4_vhcr *vhcr,
3666 struct mlx4_cmd_mailbox *inbox,
3667 struct mlx4_cmd_mailbox *outbox,
3668 struct mlx4_cmd_info *cmd)
3671 int srqn = vhcr->in_modifier;
3672 struct res_srq *srq;
3674 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3678 if (srq->com.from_state != RES_SRQ_HW) {
3683 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3685 put_res(dev, slave, srqn, RES_SRQ);
3689 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3690 struct mlx4_vhcr *vhcr,
3691 struct mlx4_cmd_mailbox *inbox,
3692 struct mlx4_cmd_mailbox *outbox,
3693 struct mlx4_cmd_info *cmd)
3696 int qpn = vhcr->in_modifier & 0x7fffff;
3699 err = get_res(dev, slave, qpn, RES_QP, &qp);
3702 if (qp->com.from_state != RES_QP_HW) {
3707 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3709 put_res(dev, slave, qpn, RES_QP);
3713 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3714 struct mlx4_vhcr *vhcr,
3715 struct mlx4_cmd_mailbox *inbox,
3716 struct mlx4_cmd_mailbox *outbox,
3717 struct mlx4_cmd_info *cmd)
3719 struct mlx4_qp_context *context = inbox->buf + 8;
3720 adjust_proxy_tun_qkey(dev, vhcr, context);
3721 update_pkey_index(dev, slave, inbox);
3722 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3725 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3726 struct mlx4_qp_context *qpc,
3727 struct mlx4_cmd_mailbox *inbox)
3729 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3731 int port = mlx4_slave_convert_port(
3732 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3737 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3740 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3741 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3742 qpc->pri_path.sched_queue = pri_sched_queue;
3745 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3746 port = mlx4_slave_convert_port(
3747 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3751 qpc->alt_path.sched_queue =
3752 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3758 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3759 struct mlx4_qp_context *qpc,
3760 struct mlx4_cmd_mailbox *inbox)
3764 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3765 u8 sched = *(u8 *)(inbox->buf + 64);
3768 port = (sched >> 6 & 1) + 1;
3769 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3770 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3771 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3777 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3778 struct mlx4_vhcr *vhcr,
3779 struct mlx4_cmd_mailbox *inbox,
3780 struct mlx4_cmd_mailbox *outbox,
3781 struct mlx4_cmd_info *cmd)
3784 struct mlx4_qp_context *qpc = inbox->buf + 8;
3785 int qpn = vhcr->in_modifier & 0x7fffff;
3787 u8 orig_sched_queue;
3788 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3789 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3790 u8 orig_pri_path_fl = qpc->pri_path.fl;
3791 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3792 u8 orig_feup = qpc->pri_path.feup;
3794 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3797 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3801 if (roce_verify_mac(dev, slave, qpc, inbox))
3804 update_pkey_index(dev, slave, inbox);
3805 update_gid(dev, inbox, (u8)slave);
3806 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3807 orig_sched_queue = qpc->pri_path.sched_queue;
3809 err = get_res(dev, slave, qpn, RES_QP, &qp);
3812 if (qp->com.from_state != RES_QP_HW) {
3817 err = update_vport_qp_param(dev, inbox, slave, qpn);
3821 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3823 /* if no error, save sched queue value passed in by VF. This is
3824 * essentially the QOS value provided by the VF. This will be useful
3825 * if we allow dynamic changes from VST back to VGT
3828 qp->sched_queue = orig_sched_queue;
3829 qp->vlan_control = orig_vlan_control;
3830 qp->fvl_rx = orig_fvl_rx;
3831 qp->pri_path_fl = orig_pri_path_fl;
3832 qp->vlan_index = orig_vlan_index;
3833 qp->feup = orig_feup;
3835 put_res(dev, slave, qpn, RES_QP);
3839 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3840 struct mlx4_vhcr *vhcr,
3841 struct mlx4_cmd_mailbox *inbox,
3842 struct mlx4_cmd_mailbox *outbox,
3843 struct mlx4_cmd_info *cmd)
3846 struct mlx4_qp_context *context = inbox->buf + 8;
3848 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3851 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3855 update_pkey_index(dev, slave, inbox);
3856 update_gid(dev, inbox, (u8)slave);
3857 adjust_proxy_tun_qkey(dev, vhcr, context);
3858 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3861 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3862 struct mlx4_vhcr *vhcr,
3863 struct mlx4_cmd_mailbox *inbox,
3864 struct mlx4_cmd_mailbox *outbox,
3865 struct mlx4_cmd_info *cmd)
3868 struct mlx4_qp_context *context = inbox->buf + 8;
3870 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3873 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3877 update_pkey_index(dev, slave, inbox);
3878 update_gid(dev, inbox, (u8)slave);
3879 adjust_proxy_tun_qkey(dev, vhcr, context);
3880 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3884 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3885 struct mlx4_vhcr *vhcr,
3886 struct mlx4_cmd_mailbox *inbox,
3887 struct mlx4_cmd_mailbox *outbox,
3888 struct mlx4_cmd_info *cmd)
3890 struct mlx4_qp_context *context = inbox->buf + 8;
3891 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3894 adjust_proxy_tun_qkey(dev, vhcr, context);
3895 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3898 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3899 struct mlx4_vhcr *vhcr,
3900 struct mlx4_cmd_mailbox *inbox,
3901 struct mlx4_cmd_mailbox *outbox,
3902 struct mlx4_cmd_info *cmd)
3905 struct mlx4_qp_context *context = inbox->buf + 8;
3907 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3910 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3914 adjust_proxy_tun_qkey(dev, vhcr, context);
3915 update_gid(dev, inbox, (u8)slave);
3916 update_pkey_index(dev, slave, inbox);
3917 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3920 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3921 struct mlx4_vhcr *vhcr,
3922 struct mlx4_cmd_mailbox *inbox,
3923 struct mlx4_cmd_mailbox *outbox,
3924 struct mlx4_cmd_info *cmd)
3927 struct mlx4_qp_context *context = inbox->buf + 8;
3929 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3932 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3936 adjust_proxy_tun_qkey(dev, vhcr, context);
3937 update_gid(dev, inbox, (u8)slave);
3938 update_pkey_index(dev, slave, inbox);
3939 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3942 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3943 struct mlx4_vhcr *vhcr,
3944 struct mlx4_cmd_mailbox *inbox,
3945 struct mlx4_cmd_mailbox *outbox,
3946 struct mlx4_cmd_info *cmd)
3949 int qpn = vhcr->in_modifier & 0x7fffff;
3952 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3955 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3959 atomic_dec(&qp->mtt->ref_count);
3960 atomic_dec(&qp->rcq->ref_count);
3961 atomic_dec(&qp->scq->ref_count);
3963 atomic_dec(&qp->srq->ref_count);
3964 res_end_move(dev, slave, RES_QP, qpn);
3968 res_abort_move(dev, slave, RES_QP, qpn);
3973 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3974 struct res_qp *rqp, u8 *gid)
3976 struct res_gid *res;
3978 list_for_each_entry(res, &rqp->mcg_list, list) {
3979 if (!memcmp(res->gid, gid, 16))
3985 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3986 u8 *gid, enum mlx4_protocol prot,
3987 enum mlx4_steer_type steer, u64 reg_id)
3989 struct res_gid *res;
3992 res = kzalloc(sizeof *res, GFP_KERNEL);
3996 spin_lock_irq(&rqp->mcg_spl);
3997 if (find_gid(dev, slave, rqp, gid)) {
4001 memcpy(res->gid, gid, 16);
4004 res->reg_id = reg_id;
4005 list_add_tail(&res->list, &rqp->mcg_list);
4008 spin_unlock_irq(&rqp->mcg_spl);
4013 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4014 u8 *gid, enum mlx4_protocol prot,
4015 enum mlx4_steer_type steer, u64 *reg_id)
4017 struct res_gid *res;
4020 spin_lock_irq(&rqp->mcg_spl);
4021 res = find_gid(dev, slave, rqp, gid);
4022 if (!res || res->prot != prot || res->steer != steer)
4025 *reg_id = res->reg_id;
4026 list_del(&res->list);
4030 spin_unlock_irq(&rqp->mcg_spl);
4035 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4036 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4037 enum mlx4_steer_type type, u64 *reg_id)
4039 switch (dev->caps.steering_mode) {
4040 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4041 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4044 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4045 block_loopback, prot,
4048 case MLX4_STEERING_MODE_B0:
4049 if (prot == MLX4_PROT_ETH) {
4050 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4055 return mlx4_qp_attach_common(dev, qp, gid,
4056 block_loopback, prot, type);
4062 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4063 u8 gid[16], enum mlx4_protocol prot,
4064 enum mlx4_steer_type type, u64 reg_id)
4066 switch (dev->caps.steering_mode) {
4067 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4068 return mlx4_flow_detach(dev, reg_id);
4069 case MLX4_STEERING_MODE_B0:
4070 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4076 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4077 u8 *gid, enum mlx4_protocol prot)
4081 if (prot != MLX4_PROT_ETH)
4084 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4085 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4086 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4095 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4096 struct mlx4_vhcr *vhcr,
4097 struct mlx4_cmd_mailbox *inbox,
4098 struct mlx4_cmd_mailbox *outbox,
4099 struct mlx4_cmd_info *cmd)
4101 struct mlx4_qp qp; /* dummy for calling attach/detach */
4102 u8 *gid = inbox->buf;
4103 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4108 int attach = vhcr->op_modifier;
4109 int block_loopback = vhcr->in_modifier >> 31;
4110 u8 steer_type_mask = 2;
4111 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4113 qpn = vhcr->in_modifier & 0xffffff;
4114 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4120 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4123 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4126 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4130 err = mlx4_adjust_port(dev, slave, gid, prot);
4134 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
4138 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4140 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4143 put_res(dev, slave, qpn, RES_QP);
4147 qp_detach(dev, &qp, gid, prot, type, reg_id);
4149 put_res(dev, slave, qpn, RES_QP);
4154 * MAC validation for Flow Steering rules.
4155 * VF can attach rules only with a mac address which is assigned to it.
4157 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4158 struct list_head *rlist)
4160 struct mac_res *res, *tmp;
4163 /* make sure it isn't multicast or broadcast mac*/
4164 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4165 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4166 list_for_each_entry_safe(res, tmp, rlist, list) {
4167 be_mac = cpu_to_be64(res->mac << 16);
4168 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4171 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4172 eth_header->eth.dst_mac, slave);
4179 * In case of missing eth header, append eth header with a MAC address
4180 * assigned to the VF.
4182 static int add_eth_header(struct mlx4_dev *dev, int slave,
4183 struct mlx4_cmd_mailbox *inbox,
4184 struct list_head *rlist, int header_id)
4186 struct mac_res *res, *tmp;
4188 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4189 struct mlx4_net_trans_rule_hw_eth *eth_header;
4190 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4191 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4193 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4195 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4197 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4199 /* Clear a space in the inbox for eth header */
4200 switch (header_id) {
4201 case MLX4_NET_TRANS_RULE_ID_IPV4:
4203 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4204 memmove(ip_header, eth_header,
4205 sizeof(*ip_header) + sizeof(*l4_header));
4207 case MLX4_NET_TRANS_RULE_ID_TCP:
4208 case MLX4_NET_TRANS_RULE_ID_UDP:
4209 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4211 memmove(l4_header, eth_header, sizeof(*l4_header));
4216 list_for_each_entry_safe(res, tmp, rlist, list) {
4217 if (port == res->port) {
4218 be_mac = cpu_to_be64(res->mac << 16);
4223 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4228 memset(eth_header, 0, sizeof(*eth_header));
4229 eth_header->size = sizeof(*eth_header) >> 2;
4230 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4231 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4232 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4238 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4239 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4240 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4241 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4242 struct mlx4_vhcr *vhcr,
4243 struct mlx4_cmd_mailbox *inbox,
4244 struct mlx4_cmd_mailbox *outbox,
4245 struct mlx4_cmd_info *cmd_info)
4248 u32 qpn = vhcr->in_modifier & 0xffffff;
4252 u64 pri_addr_path_mask;
4253 struct mlx4_update_qp_context *cmd;
4256 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4258 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4259 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4260 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4263 if ((pri_addr_path_mask &
4264 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4265 !(dev->caps.flags2 &
4266 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4267 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4272 /* Just change the smac for the QP */
4273 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4275 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4279 port = (rqp->sched_queue >> 6 & 1) + 1;
4281 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4282 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4283 err = mac_find_smac_ix_in_slave(dev, slave, port,
4287 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4293 err = mlx4_cmd(dev, inbox->dma,
4294 vhcr->in_modifier, 0,
4295 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4298 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4303 put_res(dev, slave, qpn, RES_QP);
4307 static u32 qp_attach_mbox_size(void *mbox)
4309 u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4310 struct _rule_hw *rule_header;
4312 rule_header = (struct _rule_hw *)(mbox + size);
4314 while (rule_header->size) {
4315 size += rule_header->size * sizeof(u32);
4321 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4323 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4324 struct mlx4_vhcr *vhcr,
4325 struct mlx4_cmd_mailbox *inbox,
4326 struct mlx4_cmd_mailbox *outbox,
4327 struct mlx4_cmd_info *cmd)
4330 struct mlx4_priv *priv = mlx4_priv(dev);
4331 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4332 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4336 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4337 struct _rule_hw *rule_header;
4339 struct res_fs_rule *rrule;
4342 if (dev->caps.steering_mode !=
4343 MLX4_STEERING_MODE_DEVICE_MANAGED)
4346 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4347 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4351 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4352 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4354 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4357 rule_header = (struct _rule_hw *)(ctrl + 1);
4358 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4360 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4361 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
4363 switch (header_id) {
4364 case MLX4_NET_TRANS_RULE_ID_ETH:
4365 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4370 case MLX4_NET_TRANS_RULE_ID_IB:
4372 case MLX4_NET_TRANS_RULE_ID_IPV4:
4373 case MLX4_NET_TRANS_RULE_ID_TCP:
4374 case MLX4_NET_TRANS_RULE_ID_UDP:
4375 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4376 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4380 vhcr->in_modifier +=
4381 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4384 pr_err("Corrupted mailbox\n");
4389 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4390 vhcr->in_modifier, 0,
4391 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4397 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4399 mlx4_err(dev, "Fail to add flow steering resources\n");
4403 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4407 mbox_size = qp_attach_mbox_size(inbox->buf);
4408 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4409 if (!rrule->mirr_mbox) {
4413 rrule->mirr_mbox_size = mbox_size;
4414 rrule->mirr_rule_id = 0;
4415 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4417 /* set different port */
4418 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4419 if (ctrl->port == 1)
4424 if (mlx4_is_bonded(dev))
4425 mlx4_do_mirror_rule(dev, rrule);
4427 atomic_inc(&rqp->ref_count);
4430 put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4432 /* detach rule on error */
4434 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4435 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4438 put_res(dev, slave, qpn, RES_QP);
4442 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4446 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4448 mlx4_err(dev, "Fail to remove flow steering resources\n");
4452 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4453 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4457 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4458 struct mlx4_vhcr *vhcr,
4459 struct mlx4_cmd_mailbox *inbox,
4460 struct mlx4_cmd_mailbox *outbox,
4461 struct mlx4_cmd_info *cmd)
4465 struct res_fs_rule *rrule;
4469 if (dev->caps.steering_mode !=
4470 MLX4_STEERING_MODE_DEVICE_MANAGED)
4473 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4477 if (!rrule->mirr_mbox) {
4478 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4479 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4482 mirr_reg_id = rrule->mirr_rule_id;
4483 kfree(rrule->mirr_mbox);
4486 /* Release the rule form busy state before removal */
4487 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4488 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4492 if (mirr_reg_id && mlx4_is_bonded(dev)) {
4493 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4495 mlx4_err(dev, "Fail to get resource of mirror rule\n");
4497 put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4498 mlx4_undo_mirror_rule(dev, rrule);
4501 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4503 mlx4_err(dev, "Fail to remove flow steering resources\n");
4507 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4508 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4511 atomic_dec(&rqp->ref_count);
4513 put_res(dev, slave, qpn, RES_QP);
4518 BUSY_MAX_RETRIES = 10
4521 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4522 struct mlx4_vhcr *vhcr,
4523 struct mlx4_cmd_mailbox *inbox,
4524 struct mlx4_cmd_mailbox *outbox,
4525 struct mlx4_cmd_info *cmd)
4528 int index = vhcr->in_modifier & 0xffff;
4530 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4534 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4535 put_res(dev, slave, index, RES_COUNTER);
4539 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4541 struct res_gid *rgid;
4542 struct res_gid *tmp;
4543 struct mlx4_qp qp; /* dummy for calling attach/detach */
4545 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4546 switch (dev->caps.steering_mode) {
4547 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4548 mlx4_flow_detach(dev, rgid->reg_id);
4550 case MLX4_STEERING_MODE_B0:
4551 qp.qpn = rqp->local_qpn;
4552 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4553 rgid->prot, rgid->steer);
4556 list_del(&rgid->list);
4561 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4562 enum mlx4_resource type, int print)
4564 struct mlx4_priv *priv = mlx4_priv(dev);
4565 struct mlx4_resource_tracker *tracker =
4566 &priv->mfunc.master.res_tracker;
4567 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4568 struct res_common *r;
4569 struct res_common *tmp;
4573 spin_lock_irq(mlx4_tlock(dev));
4574 list_for_each_entry_safe(r, tmp, rlist, list) {
4575 if (r->owner == slave) {
4577 if (r->state == RES_ANY_BUSY) {
4580 "%s id 0x%llx is busy\n",
4585 r->from_state = r->state;
4586 r->state = RES_ANY_BUSY;
4592 spin_unlock_irq(mlx4_tlock(dev));
4597 static int move_all_busy(struct mlx4_dev *dev, int slave,
4598 enum mlx4_resource type)
4600 unsigned long begin;
4605 busy = _move_all_busy(dev, slave, type, 0);
4606 if (time_after(jiffies, begin + 5 * HZ))
4613 busy = _move_all_busy(dev, slave, type, 1);
4617 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4619 struct mlx4_priv *priv = mlx4_priv(dev);
4620 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4621 struct list_head *qp_list =
4622 &tracker->slave_list[slave].res_list[RES_QP];
4630 err = move_all_busy(dev, slave, RES_QP);
4632 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4635 spin_lock_irq(mlx4_tlock(dev));
4636 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4637 spin_unlock_irq(mlx4_tlock(dev));
4638 if (qp->com.owner == slave) {
4639 qpn = qp->com.res_id;
4640 detach_qp(dev, slave, qp);
4641 state = qp->com.from_state;
4642 while (state != 0) {
4644 case RES_QP_RESERVED:
4645 spin_lock_irq(mlx4_tlock(dev));
4646 rb_erase(&qp->com.node,
4647 &tracker->res_tree[RES_QP]);
4648 list_del(&qp->com.list);
4649 spin_unlock_irq(mlx4_tlock(dev));
4650 if (!valid_reserved(dev, slave, qpn)) {
4651 __mlx4_qp_release_range(dev, qpn, 1);
4652 mlx4_release_resource(dev, slave,
4659 if (!valid_reserved(dev, slave, qpn))
4660 __mlx4_qp_free_icm(dev, qpn);
4661 state = RES_QP_RESERVED;
4665 err = mlx4_cmd(dev, in_param,
4668 MLX4_CMD_TIME_CLASS_A,
4671 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4672 slave, qp->local_qpn);
4673 atomic_dec(&qp->rcq->ref_count);
4674 atomic_dec(&qp->scq->ref_count);
4675 atomic_dec(&qp->mtt->ref_count);
4677 atomic_dec(&qp->srq->ref_count);
4678 state = RES_QP_MAPPED;
4685 spin_lock_irq(mlx4_tlock(dev));
4687 spin_unlock_irq(mlx4_tlock(dev));
4690 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4692 struct mlx4_priv *priv = mlx4_priv(dev);
4693 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4694 struct list_head *srq_list =
4695 &tracker->slave_list[slave].res_list[RES_SRQ];
4696 struct res_srq *srq;
4697 struct res_srq *tmp;
4704 err = move_all_busy(dev, slave, RES_SRQ);
4706 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4709 spin_lock_irq(mlx4_tlock(dev));
4710 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4711 spin_unlock_irq(mlx4_tlock(dev));
4712 if (srq->com.owner == slave) {
4713 srqn = srq->com.res_id;
4714 state = srq->com.from_state;
4715 while (state != 0) {
4717 case RES_SRQ_ALLOCATED:
4718 __mlx4_srq_free_icm(dev, srqn);
4719 spin_lock_irq(mlx4_tlock(dev));
4720 rb_erase(&srq->com.node,
4721 &tracker->res_tree[RES_SRQ]);
4722 list_del(&srq->com.list);
4723 spin_unlock_irq(mlx4_tlock(dev));
4724 mlx4_release_resource(dev, slave,
4732 err = mlx4_cmd(dev, in_param, srqn, 1,
4734 MLX4_CMD_TIME_CLASS_A,
4737 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4740 atomic_dec(&srq->mtt->ref_count);
4742 atomic_dec(&srq->cq->ref_count);
4743 state = RES_SRQ_ALLOCATED;
4751 spin_lock_irq(mlx4_tlock(dev));
4753 spin_unlock_irq(mlx4_tlock(dev));
4756 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4758 struct mlx4_priv *priv = mlx4_priv(dev);
4759 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4760 struct list_head *cq_list =
4761 &tracker->slave_list[slave].res_list[RES_CQ];
4770 err = move_all_busy(dev, slave, RES_CQ);
4772 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4775 spin_lock_irq(mlx4_tlock(dev));
4776 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4777 spin_unlock_irq(mlx4_tlock(dev));
4778 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4779 cqn = cq->com.res_id;
4780 state = cq->com.from_state;
4781 while (state != 0) {
4783 case RES_CQ_ALLOCATED:
4784 __mlx4_cq_free_icm(dev, cqn);
4785 spin_lock_irq(mlx4_tlock(dev));
4786 rb_erase(&cq->com.node,
4787 &tracker->res_tree[RES_CQ]);
4788 list_del(&cq->com.list);
4789 spin_unlock_irq(mlx4_tlock(dev));
4790 mlx4_release_resource(dev, slave,
4798 err = mlx4_cmd(dev, in_param, cqn, 1,
4800 MLX4_CMD_TIME_CLASS_A,
4803 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4805 atomic_dec(&cq->mtt->ref_count);
4806 state = RES_CQ_ALLOCATED;
4814 spin_lock_irq(mlx4_tlock(dev));
4816 spin_unlock_irq(mlx4_tlock(dev));
4819 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4821 struct mlx4_priv *priv = mlx4_priv(dev);
4822 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4823 struct list_head *mpt_list =
4824 &tracker->slave_list[slave].res_list[RES_MPT];
4825 struct res_mpt *mpt;
4826 struct res_mpt *tmp;
4833 err = move_all_busy(dev, slave, RES_MPT);
4835 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4838 spin_lock_irq(mlx4_tlock(dev));
4839 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4840 spin_unlock_irq(mlx4_tlock(dev));
4841 if (mpt->com.owner == slave) {
4842 mptn = mpt->com.res_id;
4843 state = mpt->com.from_state;
4844 while (state != 0) {
4846 case RES_MPT_RESERVED:
4847 __mlx4_mpt_release(dev, mpt->key);
4848 spin_lock_irq(mlx4_tlock(dev));
4849 rb_erase(&mpt->com.node,
4850 &tracker->res_tree[RES_MPT]);
4851 list_del(&mpt->com.list);
4852 spin_unlock_irq(mlx4_tlock(dev));
4853 mlx4_release_resource(dev, slave,
4859 case RES_MPT_MAPPED:
4860 __mlx4_mpt_free_icm(dev, mpt->key);
4861 state = RES_MPT_RESERVED;
4866 err = mlx4_cmd(dev, in_param, mptn, 0,
4868 MLX4_CMD_TIME_CLASS_A,
4871 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4874 atomic_dec(&mpt->mtt->ref_count);
4875 state = RES_MPT_MAPPED;
4882 spin_lock_irq(mlx4_tlock(dev));
4884 spin_unlock_irq(mlx4_tlock(dev));
4887 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4889 struct mlx4_priv *priv = mlx4_priv(dev);
4890 struct mlx4_resource_tracker *tracker =
4891 &priv->mfunc.master.res_tracker;
4892 struct list_head *mtt_list =
4893 &tracker->slave_list[slave].res_list[RES_MTT];
4894 struct res_mtt *mtt;
4895 struct res_mtt *tmp;
4901 err = move_all_busy(dev, slave, RES_MTT);
4903 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4906 spin_lock_irq(mlx4_tlock(dev));
4907 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4908 spin_unlock_irq(mlx4_tlock(dev));
4909 if (mtt->com.owner == slave) {
4910 base = mtt->com.res_id;
4911 state = mtt->com.from_state;
4912 while (state != 0) {
4914 case RES_MTT_ALLOCATED:
4915 __mlx4_free_mtt_range(dev, base,
4917 spin_lock_irq(mlx4_tlock(dev));
4918 rb_erase(&mtt->com.node,
4919 &tracker->res_tree[RES_MTT]);
4920 list_del(&mtt->com.list);
4921 spin_unlock_irq(mlx4_tlock(dev));
4922 mlx4_release_resource(dev, slave, RES_MTT,
4923 1 << mtt->order, 0);
4933 spin_lock_irq(mlx4_tlock(dev));
4935 spin_unlock_irq(mlx4_tlock(dev));
4938 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4940 struct mlx4_cmd_mailbox *mailbox;
4942 struct res_fs_rule *mirr_rule;
4945 mailbox = mlx4_alloc_cmd_mailbox(dev);
4946 if (IS_ERR(mailbox))
4947 return PTR_ERR(mailbox);
4949 if (!fs_rule->mirr_mbox) {
4950 mlx4_err(dev, "rule mirroring mailbox is null\n");
4951 mlx4_free_cmd_mailbox(dev, mailbox);
4954 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4955 err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0,
4956 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4958 mlx4_free_cmd_mailbox(dev, mailbox);
4963 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4967 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4971 fs_rule->mirr_rule_id = reg_id;
4972 mirr_rule->mirr_rule_id = 0;
4973 mirr_rule->mirr_mbox_size = 0;
4974 mirr_rule->mirr_mbox = NULL;
4975 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
4979 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
4981 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4982 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4987 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
4989 struct mlx4_priv *priv = mlx4_priv(dev);
4990 struct mlx4_resource_tracker *tracker =
4991 &priv->mfunc.master.res_tracker;
4992 struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
4994 struct res_fs_rule *fs_rule;
4996 LIST_HEAD(mirr_list);
4998 for (p = rb_first(root); p; p = rb_next(p)) {
4999 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5000 if ((bond && fs_rule->mirr_mbox_size) ||
5001 (!bond && !fs_rule->mirr_mbox_size))
5002 list_add_tail(&fs_rule->mirr_list, &mirr_list);
5005 list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5007 err += mlx4_do_mirror_rule(dev, fs_rule);
5009 err += mlx4_undo_mirror_rule(dev, fs_rule);
5014 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5016 return mlx4_mirror_fs_rules(dev, true);
5019 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5021 return mlx4_mirror_fs_rules(dev, false);
5024 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5026 struct mlx4_priv *priv = mlx4_priv(dev);
5027 struct mlx4_resource_tracker *tracker =
5028 &priv->mfunc.master.res_tracker;
5029 struct list_head *fs_rule_list =
5030 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5031 struct res_fs_rule *fs_rule;
5032 struct res_fs_rule *tmp;
5037 err = move_all_busy(dev, slave, RES_FS_RULE);
5039 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5042 spin_lock_irq(mlx4_tlock(dev));
5043 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5044 spin_unlock_irq(mlx4_tlock(dev));
5045 if (fs_rule->com.owner == slave) {
5046 base = fs_rule->com.res_id;
5047 state = fs_rule->com.from_state;
5048 while (state != 0) {
5050 case RES_FS_RULE_ALLOCATED:
5052 err = mlx4_cmd(dev, base, 0, 0,
5053 MLX4_QP_FLOW_STEERING_DETACH,
5054 MLX4_CMD_TIME_CLASS_A,
5057 spin_lock_irq(mlx4_tlock(dev));
5058 rb_erase(&fs_rule->com.node,
5059 &tracker->res_tree[RES_FS_RULE]);
5060 list_del(&fs_rule->com.list);
5061 spin_unlock_irq(mlx4_tlock(dev));
5062 kfree(fs_rule->mirr_mbox);
5072 spin_lock_irq(mlx4_tlock(dev));
5074 spin_unlock_irq(mlx4_tlock(dev));
5077 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5079 struct mlx4_priv *priv = mlx4_priv(dev);
5080 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5081 struct list_head *eq_list =
5082 &tracker->slave_list[slave].res_list[RES_EQ];
5090 err = move_all_busy(dev, slave, RES_EQ);
5092 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5095 spin_lock_irq(mlx4_tlock(dev));
5096 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5097 spin_unlock_irq(mlx4_tlock(dev));
5098 if (eq->com.owner == slave) {
5099 eqn = eq->com.res_id;
5100 state = eq->com.from_state;
5101 while (state != 0) {
5103 case RES_EQ_RESERVED:
5104 spin_lock_irq(mlx4_tlock(dev));
5105 rb_erase(&eq->com.node,
5106 &tracker->res_tree[RES_EQ]);
5107 list_del(&eq->com.list);
5108 spin_unlock_irq(mlx4_tlock(dev));
5114 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5115 1, MLX4_CMD_HW2SW_EQ,
5116 MLX4_CMD_TIME_CLASS_A,
5119 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5120 slave, eqn & 0x3ff);
5121 atomic_dec(&eq->mtt->ref_count);
5122 state = RES_EQ_RESERVED;
5130 spin_lock_irq(mlx4_tlock(dev));
5132 spin_unlock_irq(mlx4_tlock(dev));
5135 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5137 struct mlx4_priv *priv = mlx4_priv(dev);
5138 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5139 struct list_head *counter_list =
5140 &tracker->slave_list[slave].res_list[RES_COUNTER];
5141 struct res_counter *counter;
5142 struct res_counter *tmp;
5144 int *counters_arr = NULL;
5147 err = move_all_busy(dev, slave, RES_COUNTER);
5149 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5152 counters_arr = kmalloc_array(dev->caps.max_counters,
5153 sizeof(*counters_arr), GFP_KERNEL);
5160 spin_lock_irq(mlx4_tlock(dev));
5161 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5162 if (counter->com.owner == slave) {
5163 counters_arr[i++] = counter->com.res_id;
5164 rb_erase(&counter->com.node,
5165 &tracker->res_tree[RES_COUNTER]);
5166 list_del(&counter->com.list);
5170 spin_unlock_irq(mlx4_tlock(dev));
5173 __mlx4_counter_free(dev, counters_arr[j++]);
5174 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5178 kfree(counters_arr);
5181 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5183 struct mlx4_priv *priv = mlx4_priv(dev);
5184 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5185 struct list_head *xrcdn_list =
5186 &tracker->slave_list[slave].res_list[RES_XRCD];
5187 struct res_xrcdn *xrcd;
5188 struct res_xrcdn *tmp;
5192 err = move_all_busy(dev, slave, RES_XRCD);
5194 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5197 spin_lock_irq(mlx4_tlock(dev));
5198 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5199 if (xrcd->com.owner == slave) {
5200 xrcdn = xrcd->com.res_id;
5201 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5202 list_del(&xrcd->com.list);
5204 __mlx4_xrcd_free(dev, xrcdn);
5207 spin_unlock_irq(mlx4_tlock(dev));
5210 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5212 struct mlx4_priv *priv = mlx4_priv(dev);
5213 mlx4_reset_roce_gids(dev, slave);
5214 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5215 rem_slave_vlans(dev, slave);
5216 rem_slave_macs(dev, slave);
5217 rem_slave_fs_rule(dev, slave);
5218 rem_slave_qps(dev, slave);
5219 rem_slave_srqs(dev, slave);
5220 rem_slave_cqs(dev, slave);
5221 rem_slave_mrs(dev, slave);
5222 rem_slave_eqs(dev, slave);
5223 rem_slave_mtts(dev, slave);
5224 rem_slave_counters(dev, slave);
5225 rem_slave_xrcdns(dev, slave);
5226 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5229 static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5230 struct mlx4_vf_immed_vlan_work *work)
5232 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5233 ctx->qp_context.qos_vport = work->qos_vport;
5236 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5238 struct mlx4_vf_immed_vlan_work *work =
5239 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5240 struct mlx4_cmd_mailbox *mailbox;
5241 struct mlx4_update_qp_context *upd_context;
5242 struct mlx4_dev *dev = &work->priv->dev;
5243 struct mlx4_resource_tracker *tracker =
5244 &work->priv->mfunc.master.res_tracker;
5245 struct list_head *qp_list =
5246 &tracker->slave_list[work->slave].res_list[RES_QP];
5249 u64 qp_path_mask_vlan_ctrl =
5250 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5251 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5252 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5253 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5254 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5255 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5257 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5258 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5259 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5260 (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5261 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5262 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5263 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5264 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5267 int port, errors = 0;
5270 if (mlx4_is_slave(dev)) {
5271 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5276 mailbox = mlx4_alloc_cmd_mailbox(dev);
5277 if (IS_ERR(mailbox))
5279 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5280 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5281 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5282 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5283 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5284 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5285 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5286 else if (!work->vlan_id)
5287 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5288 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5289 else if (work->vlan_proto == htons(ETH_P_8021AD))
5290 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5291 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5292 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5293 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5294 else /* vst 802.1Q */
5295 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5296 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5297 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5299 upd_context = mailbox->buf;
5300 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5302 spin_lock_irq(mlx4_tlock(dev));
5303 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5304 spin_unlock_irq(mlx4_tlock(dev));
5305 if (qp->com.owner == work->slave) {
5306 if (qp->com.from_state != RES_QP_HW ||
5307 !qp->sched_queue || /* no INIT2RTR trans yet */
5308 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5309 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5310 spin_lock_irq(mlx4_tlock(dev));
5313 port = (qp->sched_queue >> 6 & 1) + 1;
5314 if (port != work->port) {
5315 spin_lock_irq(mlx4_tlock(dev));
5318 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5319 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5321 upd_context->primary_addr_path_mask =
5322 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5323 if (work->vlan_id == MLX4_VGT) {
5324 upd_context->qp_context.param3 = qp->param3;
5325 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5326 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5327 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5328 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5329 upd_context->qp_context.pri_path.feup = qp->feup;
5330 upd_context->qp_context.pri_path.sched_queue =
5333 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5334 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5335 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5336 upd_context->qp_context.pri_path.fvl_rx =
5337 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5338 upd_context->qp_context.pri_path.fl =
5339 qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5340 if (work->vlan_proto == htons(ETH_P_8021AD))
5341 upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5343 upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5344 upd_context->qp_context.pri_path.feup =
5345 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5346 upd_context->qp_context.pri_path.sched_queue =
5347 qp->sched_queue & 0xC7;
5348 upd_context->qp_context.pri_path.sched_queue |=
5349 ((work->qos & 0x7) << 3);
5351 if (dev->caps.flags2 &
5352 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5353 update_qos_vpp(upd_context, work);
5356 err = mlx4_cmd(dev, mailbox->dma,
5357 qp->local_qpn & 0xffffff,
5358 0, MLX4_CMD_UPDATE_QP,
5359 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5361 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5362 work->slave, port, qp->local_qpn, err);
5366 spin_lock_irq(mlx4_tlock(dev));
5368 spin_unlock_irq(mlx4_tlock(dev));
5369 mlx4_free_cmd_mailbox(dev, mailbox);
5372 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5373 errors, work->slave, work->port);
5375 /* unregister previous vlan_id if needed and we had no errors
5376 * while updating the QPs
5378 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5379 NO_INDX != work->orig_vlan_ix)
5380 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5381 work->orig_vlan_id);