2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
49 #include "mlx4_stats.h"
51 #define MLX4_MAC_VALID (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT 2
53 #define MLX4_VF_COUNTERS_PER_PORT 1
56 struct list_head list;
64 struct list_head list;
72 struct list_head list;
80 const char *func_name;
88 struct list_head list;
90 enum mlx4_protocol prot;
91 enum mlx4_steer_type steer;
96 RES_QP_BUSY = RES_ANY_BUSY,
98 /* QP number was allocated */
101 /* ICM memory for QP context was mapped */
104 /* QP is in hw ownership */
109 struct res_common com;
114 struct list_head mcg_list;
119 /* saved qp params before VST enforcement in order to restore on VGT */
129 enum res_mtt_states {
130 RES_MTT_BUSY = RES_ANY_BUSY,
134 static inline const char *mtt_states_str(enum res_mtt_states state)
137 case RES_MTT_BUSY: return "RES_MTT_BUSY";
138 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
139 default: return "Unknown";
144 struct res_common com;
149 enum res_mpt_states {
150 RES_MPT_BUSY = RES_ANY_BUSY,
157 struct res_common com;
163 RES_EQ_BUSY = RES_ANY_BUSY,
169 struct res_common com;
174 RES_CQ_BUSY = RES_ANY_BUSY,
180 struct res_common com;
185 enum res_srq_states {
186 RES_SRQ_BUSY = RES_ANY_BUSY,
192 struct res_common com;
198 enum res_counter_states {
199 RES_COUNTER_BUSY = RES_ANY_BUSY,
200 RES_COUNTER_ALLOCATED,
204 struct res_common com;
208 enum res_xrcdn_states {
209 RES_XRCD_BUSY = RES_ANY_BUSY,
214 struct res_common com;
218 enum res_fs_rule_states {
219 RES_FS_RULE_BUSY = RES_ANY_BUSY,
220 RES_FS_RULE_ALLOCATED,
224 struct res_common com;
226 /* VF DMFS mbox with port flipped */
228 /* > 0 --> apply mirror when getting into HA mode */
229 /* = 0 --> un-apply mirror when getting out of HA mode */
231 struct list_head mirr_list;
235 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
237 struct rb_node *node = root->rb_node;
240 struct res_common *res = rb_entry(node, struct res_common,
243 if (res_id < res->res_id)
244 node = node->rb_left;
245 else if (res_id > res->res_id)
246 node = node->rb_right;
253 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
255 struct rb_node **new = &(root->rb_node), *parent = NULL;
257 /* Figure out where to put new node */
259 struct res_common *this = rb_entry(*new, struct res_common,
263 if (res->res_id < this->res_id)
264 new = &((*new)->rb_left);
265 else if (res->res_id > this->res_id)
266 new = &((*new)->rb_right);
271 /* Add new node and rebalance tree. */
272 rb_link_node(&res->node, parent, new);
273 rb_insert_color(&res->node, root);
288 static const char *resource_str(enum mlx4_resource rt)
291 case RES_QP: return "RES_QP";
292 case RES_CQ: return "RES_CQ";
293 case RES_SRQ: return "RES_SRQ";
294 case RES_MPT: return "RES_MPT";
295 case RES_MTT: return "RES_MTT";
296 case RES_MAC: return "RES_MAC";
297 case RES_VLAN: return "RES_VLAN";
298 case RES_EQ: return "RES_EQ";
299 case RES_COUNTER: return "RES_COUNTER";
300 case RES_FS_RULE: return "RES_FS_RULE";
301 case RES_XRCD: return "RES_XRCD";
302 default: return "Unknown resource type !!!";
306 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
307 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
308 enum mlx4_resource res_type, int count,
311 struct mlx4_priv *priv = mlx4_priv(dev);
312 struct resource_allocator *res_alloc =
313 &priv->mfunc.master.res_tracker.res_alloc[res_type];
315 int allocated, free, reserved, guaranteed, from_free;
318 if (slave > dev->persist->num_vfs)
321 spin_lock(&res_alloc->alloc_lock);
322 allocated = (port > 0) ?
323 res_alloc->allocated[(port - 1) *
324 (dev->persist->num_vfs + 1) + slave] :
325 res_alloc->allocated[slave];
326 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
328 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
329 res_alloc->res_reserved;
330 guaranteed = res_alloc->guaranteed[slave];
332 if (allocated + count > res_alloc->quota[slave]) {
333 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334 slave, port, resource_str(res_type), count,
335 allocated, res_alloc->quota[slave]);
339 if (allocated + count <= guaranteed) {
343 /* portion may need to be obtained from free area */
344 if (guaranteed - allocated > 0)
345 from_free = count - (guaranteed - allocated);
349 from_rsvd = count - from_free;
351 if (free - from_free >= reserved)
354 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355 slave, port, resource_str(res_type), free,
356 from_free, reserved);
360 /* grant the request */
362 res_alloc->allocated[(port - 1) *
363 (dev->persist->num_vfs + 1) + slave] += count;
364 res_alloc->res_port_free[port - 1] -= count;
365 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
367 res_alloc->allocated[slave] += count;
368 res_alloc->res_free -= count;
369 res_alloc->res_reserved -= from_rsvd;
374 spin_unlock(&res_alloc->alloc_lock);
378 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
379 enum mlx4_resource res_type, int count,
382 struct mlx4_priv *priv = mlx4_priv(dev);
383 struct resource_allocator *res_alloc =
384 &priv->mfunc.master.res_tracker.res_alloc[res_type];
385 int allocated, guaranteed, from_rsvd;
387 if (slave > dev->persist->num_vfs)
390 spin_lock(&res_alloc->alloc_lock);
392 allocated = (port > 0) ?
393 res_alloc->allocated[(port - 1) *
394 (dev->persist->num_vfs + 1) + slave] :
395 res_alloc->allocated[slave];
396 guaranteed = res_alloc->guaranteed[slave];
398 if (allocated - count >= guaranteed) {
401 /* portion may need to be returned to reserved area */
402 if (allocated - guaranteed > 0)
403 from_rsvd = count - (allocated - guaranteed);
409 res_alloc->allocated[(port - 1) *
410 (dev->persist->num_vfs + 1) + slave] -= count;
411 res_alloc->res_port_free[port - 1] += count;
412 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
414 res_alloc->allocated[slave] -= count;
415 res_alloc->res_free += count;
416 res_alloc->res_reserved += from_rsvd;
419 spin_unlock(&res_alloc->alloc_lock);
423 static inline void initialize_res_quotas(struct mlx4_dev *dev,
424 struct resource_allocator *res_alloc,
425 enum mlx4_resource res_type,
426 int vf, int num_instances)
428 res_alloc->guaranteed[vf] = num_instances /
429 (2 * (dev->persist->num_vfs + 1));
430 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
431 if (vf == mlx4_master_func_num(dev)) {
432 res_alloc->res_free = num_instances;
433 if (res_type == RES_MTT) {
434 /* reserved mtts will be taken out of the PF allocation */
435 res_alloc->res_free += dev->caps.reserved_mtts;
436 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
437 res_alloc->quota[vf] += dev->caps.reserved_mtts;
442 void mlx4_init_quotas(struct mlx4_dev *dev)
444 struct mlx4_priv *priv = mlx4_priv(dev);
447 /* quotas for VFs are initialized in mlx4_slave_cap */
448 if (mlx4_is_slave(dev))
451 if (!mlx4_is_mfunc(dev)) {
452 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
453 mlx4_num_reserved_sqps(dev);
454 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
455 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
456 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
457 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
461 pf = mlx4_master_func_num(dev);
463 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
465 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
467 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
469 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
471 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
475 mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
476 struct resource_allocator *res_alloc,
479 struct mlx4_active_ports actv_ports;
480 int ports, counters_guaranteed;
482 /* For master, only allocate according to the number of phys ports */
483 if (vf == mlx4_master_func_num(dev))
484 return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
486 /* calculate real number of ports for the VF */
487 actv_ports = mlx4_get_active_ports(dev, vf);
488 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
489 counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
491 /* If we do not have enough counters for this VF, do not
492 * allocate any for it. '-1' to reduce the sink counter.
494 if ((res_alloc->res_reserved + counters_guaranteed) >
495 (dev->caps.max_counters - 1))
498 return counters_guaranteed;
501 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
503 struct mlx4_priv *priv = mlx4_priv(dev);
507 priv->mfunc.master.res_tracker.slave_list =
508 kzalloc(dev->num_slaves * sizeof(struct slave_list),
510 if (!priv->mfunc.master.res_tracker.slave_list)
513 for (i = 0 ; i < dev->num_slaves; i++) {
514 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
515 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
516 slave_list[i].res_list[t]);
517 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
520 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
522 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
523 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
525 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
526 struct resource_allocator *res_alloc =
527 &priv->mfunc.master.res_tracker.res_alloc[i];
528 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
529 sizeof(int), GFP_KERNEL);
530 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
531 sizeof(int), GFP_KERNEL);
532 if (i == RES_MAC || i == RES_VLAN)
533 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
534 (dev->persist->num_vfs
536 sizeof(int), GFP_KERNEL);
538 res_alloc->allocated = kzalloc((dev->persist->
540 sizeof(int), GFP_KERNEL);
541 /* Reduce the sink counter */
542 if (i == RES_COUNTER)
543 res_alloc->res_free = dev->caps.max_counters - 1;
545 if (!res_alloc->quota || !res_alloc->guaranteed ||
546 !res_alloc->allocated)
549 spin_lock_init(&res_alloc->alloc_lock);
550 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
551 struct mlx4_active_ports actv_ports =
552 mlx4_get_active_ports(dev, t);
555 initialize_res_quotas(dev, res_alloc, RES_QP,
556 t, dev->caps.num_qps -
557 dev->caps.reserved_qps -
558 mlx4_num_reserved_sqps(dev));
561 initialize_res_quotas(dev, res_alloc, RES_CQ,
562 t, dev->caps.num_cqs -
563 dev->caps.reserved_cqs);
566 initialize_res_quotas(dev, res_alloc, RES_SRQ,
567 t, dev->caps.num_srqs -
568 dev->caps.reserved_srqs);
571 initialize_res_quotas(dev, res_alloc, RES_MPT,
572 t, dev->caps.num_mpts -
573 dev->caps.reserved_mrws);
576 initialize_res_quotas(dev, res_alloc, RES_MTT,
577 t, dev->caps.num_mtts -
578 dev->caps.reserved_mtts);
581 if (t == mlx4_master_func_num(dev)) {
582 int max_vfs_pport = 0;
583 /* Calculate the max vfs per port for */
585 for (j = 0; j < dev->caps.num_ports;
587 struct mlx4_slaves_pport slaves_pport =
588 mlx4_phys_to_slaves_pport(dev, j + 1);
589 unsigned current_slaves =
590 bitmap_weight(slaves_pport.slaves,
591 dev->caps.num_ports) - 1;
592 if (max_vfs_pport < current_slaves)
596 res_alloc->quota[t] =
599 res_alloc->guaranteed[t] = 2;
600 for (j = 0; j < MLX4_MAX_PORTS; j++)
601 res_alloc->res_port_free[j] =
604 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
605 res_alloc->guaranteed[t] = 2;
609 if (t == mlx4_master_func_num(dev)) {
610 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
611 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
612 for (j = 0; j < MLX4_MAX_PORTS; j++)
613 res_alloc->res_port_free[j] =
616 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
617 res_alloc->guaranteed[t] = 0;
621 res_alloc->quota[t] = dev->caps.max_counters;
622 res_alloc->guaranteed[t] =
623 mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
628 if (i == RES_MAC || i == RES_VLAN) {
629 for (j = 0; j < dev->caps.num_ports; j++)
630 if (test_bit(j, actv_ports.ports))
631 res_alloc->res_port_rsvd[j] +=
632 res_alloc->guaranteed[t];
634 res_alloc->res_reserved += res_alloc->guaranteed[t];
638 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
642 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
643 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
644 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
645 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
646 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
647 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
648 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
653 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
654 enum mlx4_res_tracker_free_type type)
656 struct mlx4_priv *priv = mlx4_priv(dev);
659 if (priv->mfunc.master.res_tracker.slave_list) {
660 if (type != RES_TR_FREE_STRUCTS_ONLY) {
661 for (i = 0; i < dev->num_slaves; i++) {
662 if (type == RES_TR_FREE_ALL ||
663 dev->caps.function != i)
664 mlx4_delete_all_resources_for_slave(dev, i);
666 /* free master's vlans */
667 i = dev->caps.function;
668 mlx4_reset_roce_gids(dev, i);
669 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
670 rem_slave_vlans(dev, i);
671 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
674 if (type != RES_TR_FREE_SLAVES_ONLY) {
675 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
676 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
677 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
678 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
679 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
680 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
681 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
683 kfree(priv->mfunc.master.res_tracker.slave_list);
684 priv->mfunc.master.res_tracker.slave_list = NULL;
689 static void update_pkey_index(struct mlx4_dev *dev, int slave,
690 struct mlx4_cmd_mailbox *inbox)
692 u8 sched = *(u8 *)(inbox->buf + 64);
693 u8 orig_index = *(u8 *)(inbox->buf + 35);
695 struct mlx4_priv *priv = mlx4_priv(dev);
698 port = (sched >> 6 & 1) + 1;
700 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
701 *(u8 *)(inbox->buf + 35) = new_index;
704 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
707 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
708 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
709 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
712 if (MLX4_QP_ST_UD == ts) {
713 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
714 if (mlx4_is_eth(dev, port))
715 qp_ctx->pri_path.mgid_index =
716 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
718 qp_ctx->pri_path.mgid_index = slave | 0x80;
720 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
721 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
722 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
723 if (mlx4_is_eth(dev, port)) {
724 qp_ctx->pri_path.mgid_index +=
725 mlx4_get_base_gid_ix(dev, slave, port);
726 qp_ctx->pri_path.mgid_index &= 0x7f;
728 qp_ctx->pri_path.mgid_index = slave & 0x7F;
731 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
732 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
733 if (mlx4_is_eth(dev, port)) {
734 qp_ctx->alt_path.mgid_index +=
735 mlx4_get_base_gid_ix(dev, slave, port);
736 qp_ctx->alt_path.mgid_index &= 0x7f;
738 qp_ctx->alt_path.mgid_index = slave & 0x7F;
744 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
747 static int update_vport_qp_param(struct mlx4_dev *dev,
748 struct mlx4_cmd_mailbox *inbox,
751 struct mlx4_qp_context *qpc = inbox->buf + 8;
752 struct mlx4_vport_oper_state *vp_oper;
753 struct mlx4_priv *priv;
757 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
758 priv = mlx4_priv(dev);
759 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
760 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
762 err = handle_counter(dev, qpc, slave, port);
766 if (MLX4_VGT != vp_oper->state.default_vlan) {
767 /* the reserved QPs (special, proxy, tunnel)
768 * do not operate over vlans
770 if (mlx4_is_qp_reserved(dev, qpn))
773 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
774 if (qp_type == MLX4_QP_ST_UD ||
775 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
776 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
777 *(__be32 *)inbox->buf =
778 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
779 MLX4_QP_OPTPAR_VLAN_STRIPPING);
780 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
782 struct mlx4_update_qp_params params = {.flags = 0};
784 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
790 /* preserve IF_COUNTER flag */
791 qpc->pri_path.vlan_control &=
792 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
793 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
794 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
795 qpc->pri_path.vlan_control |=
796 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
797 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
798 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
799 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
800 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
801 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
802 } else if (0 != vp_oper->state.default_vlan) {
803 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
804 /* vst QinQ should block untagged on TX,
805 * but cvlan is in payload and phv is set so
806 * hw see it as untagged. Block tagged instead.
808 qpc->pri_path.vlan_control |=
809 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
810 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
811 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
812 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
813 } else { /* vst 802.1Q */
814 qpc->pri_path.vlan_control |=
815 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
816 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
817 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
819 } else { /* priority tagged */
820 qpc->pri_path.vlan_control |=
821 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
822 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
825 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
826 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
827 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
828 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
829 qpc->pri_path.fl |= MLX4_FL_SV;
831 qpc->pri_path.fl |= MLX4_FL_CV;
832 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
833 qpc->pri_path.sched_queue &= 0xC7;
834 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
835 qpc->qos_vport = vp_oper->state.qos_vport;
837 if (vp_oper->state.spoofchk) {
838 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
839 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
845 static int mpt_mask(struct mlx4_dev *dev)
847 return dev->caps.num_mpts - 1;
850 static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
876 return "INVALID RESOURCE";
880 static void *find_res(struct mlx4_dev *dev, u64 res_id,
881 enum mlx4_resource type)
883 struct mlx4_priv *priv = mlx4_priv(dev);
885 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
889 static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
890 enum mlx4_resource type,
891 void *res, const char *func_name)
893 struct res_common *r;
896 spin_lock_irq(mlx4_tlock(dev));
897 r = find_res(dev, res_id, type);
903 if (r->state == RES_ANY_BUSY) {
905 "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
906 func_name, slave, res_id, mlx4_resource_type_to_str(type),
912 if (r->owner != slave) {
917 r->from_state = r->state;
918 r->state = RES_ANY_BUSY;
919 r->func_name = func_name;
922 *((struct res_common **)res) = r;
925 spin_unlock_irq(mlx4_tlock(dev));
929 #define get_res(dev, slave, res_id, type, res) \
930 _get_res((dev), (slave), (res_id), (type), (res), __func__)
932 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
933 enum mlx4_resource type,
934 u64 res_id, int *slave)
937 struct res_common *r;
943 spin_lock(mlx4_tlock(dev));
945 r = find_res(dev, id, type);
950 spin_unlock(mlx4_tlock(dev));
955 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
956 enum mlx4_resource type)
958 struct res_common *r;
960 spin_lock_irq(mlx4_tlock(dev));
961 r = find_res(dev, res_id, type);
963 r->state = r->from_state;
966 spin_unlock_irq(mlx4_tlock(dev));
969 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
970 u64 in_param, u64 *out_param, int port);
972 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
975 struct res_common *r;
976 struct res_counter *counter;
979 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
982 spin_lock_irq(mlx4_tlock(dev));
983 r = find_res(dev, counter_index, RES_COUNTER);
984 if (!r || r->owner != slave) {
987 counter = container_of(r, struct res_counter, com);
989 counter->port = port;
992 spin_unlock_irq(mlx4_tlock(dev));
996 static int handle_unexisting_counter(struct mlx4_dev *dev,
997 struct mlx4_qp_context *qpc, u8 slave,
1000 struct mlx4_priv *priv = mlx4_priv(dev);
1001 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1002 struct res_common *tmp;
1003 struct res_counter *counter;
1004 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
1007 spin_lock_irq(mlx4_tlock(dev));
1008 list_for_each_entry(tmp,
1009 &tracker->slave_list[slave].res_list[RES_COUNTER],
1011 counter = container_of(tmp, struct res_counter, com);
1012 if (port == counter->port) {
1013 qpc->pri_path.counter_index = counter->com.res_id;
1014 spin_unlock_irq(mlx4_tlock(dev));
1018 spin_unlock_irq(mlx4_tlock(dev));
1020 /* No existing counter, need to allocate a new counter */
1021 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1023 if (err == -ENOENT) {
1025 } else if (err && err != -ENOSPC) {
1026 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1027 __func__, slave, err);
1029 qpc->pri_path.counter_index = counter_idx;
1030 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1031 __func__, slave, qpc->pri_path.counter_index);
1038 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1041 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1042 return handle_existing_counter(dev, slave, port,
1043 qpc->pri_path.counter_index);
1045 return handle_unexisting_counter(dev, qpc, slave, port);
1048 static struct res_common *alloc_qp_tr(int id)
1052 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1056 ret->com.res_id = id;
1057 ret->com.state = RES_QP_RESERVED;
1058 ret->local_qpn = id;
1059 INIT_LIST_HEAD(&ret->mcg_list);
1060 spin_lock_init(&ret->mcg_spl);
1061 atomic_set(&ret->ref_count, 0);
1066 static struct res_common *alloc_mtt_tr(int id, int order)
1068 struct res_mtt *ret;
1070 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1074 ret->com.res_id = id;
1076 ret->com.state = RES_MTT_ALLOCATED;
1077 atomic_set(&ret->ref_count, 0);
1082 static struct res_common *alloc_mpt_tr(int id, int key)
1084 struct res_mpt *ret;
1086 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1090 ret->com.res_id = id;
1091 ret->com.state = RES_MPT_RESERVED;
1097 static struct res_common *alloc_eq_tr(int id)
1101 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1105 ret->com.res_id = id;
1106 ret->com.state = RES_EQ_RESERVED;
1111 static struct res_common *alloc_cq_tr(int id)
1115 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1119 ret->com.res_id = id;
1120 ret->com.state = RES_CQ_ALLOCATED;
1121 atomic_set(&ret->ref_count, 0);
1126 static struct res_common *alloc_srq_tr(int id)
1128 struct res_srq *ret;
1130 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1134 ret->com.res_id = id;
1135 ret->com.state = RES_SRQ_ALLOCATED;
1136 atomic_set(&ret->ref_count, 0);
1141 static struct res_common *alloc_counter_tr(int id, int port)
1143 struct res_counter *ret;
1145 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1149 ret->com.res_id = id;
1150 ret->com.state = RES_COUNTER_ALLOCATED;
1156 static struct res_common *alloc_xrcdn_tr(int id)
1158 struct res_xrcdn *ret;
1160 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1164 ret->com.res_id = id;
1165 ret->com.state = RES_XRCD_ALLOCATED;
1170 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1172 struct res_fs_rule *ret;
1174 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
1178 ret->com.res_id = id;
1179 ret->com.state = RES_FS_RULE_ALLOCATED;
1184 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1187 struct res_common *ret;
1191 ret = alloc_qp_tr(id);
1194 ret = alloc_mpt_tr(id, extra);
1197 ret = alloc_mtt_tr(id, extra);
1200 ret = alloc_eq_tr(id);
1203 ret = alloc_cq_tr(id);
1206 ret = alloc_srq_tr(id);
1209 pr_err("implementation missing\n");
1212 ret = alloc_counter_tr(id, extra);
1215 ret = alloc_xrcdn_tr(id);
1218 ret = alloc_fs_rule_tr(id, extra);
1229 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1230 struct mlx4_counter *data)
1232 struct mlx4_priv *priv = mlx4_priv(dev);
1233 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1234 struct res_common *tmp;
1235 struct res_counter *counter;
1239 memset(data, 0, sizeof(*data));
1241 counters_arr = kmalloc_array(dev->caps.max_counters,
1242 sizeof(*counters_arr), GFP_KERNEL);
1246 spin_lock_irq(mlx4_tlock(dev));
1247 list_for_each_entry(tmp,
1248 &tracker->slave_list[slave].res_list[RES_COUNTER],
1250 counter = container_of(tmp, struct res_counter, com);
1251 if (counter->port == port) {
1252 counters_arr[i] = (int)tmp->res_id;
1256 spin_unlock_irq(mlx4_tlock(dev));
1257 counters_arr[i] = -1;
1261 while (counters_arr[i] != -1) {
1262 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1265 memset(data, 0, sizeof(*data));
1272 kfree(counters_arr);
1276 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1277 enum mlx4_resource type, int extra)
1281 struct mlx4_priv *priv = mlx4_priv(dev);
1282 struct res_common **res_arr;
1283 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1284 struct rb_root *root = &tracker->res_tree[type];
1286 res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
1290 for (i = 0; i < count; ++i) {
1291 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1293 for (--i; i >= 0; --i)
1301 spin_lock_irq(mlx4_tlock(dev));
1302 for (i = 0; i < count; ++i) {
1303 if (find_res(dev, base + i, type)) {
1307 err = res_tracker_insert(root, res_arr[i]);
1310 list_add_tail(&res_arr[i]->list,
1311 &tracker->slave_list[slave].res_list[type]);
1313 spin_unlock_irq(mlx4_tlock(dev));
1319 for (--i; i >= 0; --i) {
1320 rb_erase(&res_arr[i]->node, root);
1321 list_del_init(&res_arr[i]->list);
1324 spin_unlock_irq(mlx4_tlock(dev));
1326 for (i = 0; i < count; ++i)
1334 static int remove_qp_ok(struct res_qp *res)
1336 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1337 !list_empty(&res->mcg_list)) {
1338 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1339 res->com.state, atomic_read(&res->ref_count));
1341 } else if (res->com.state != RES_QP_RESERVED) {
1348 static int remove_mtt_ok(struct res_mtt *res, int order)
1350 if (res->com.state == RES_MTT_BUSY ||
1351 atomic_read(&res->ref_count)) {
1352 pr_devel("%s-%d: state %s, ref_count %d\n",
1354 mtt_states_str(res->com.state),
1355 atomic_read(&res->ref_count));
1357 } else if (res->com.state != RES_MTT_ALLOCATED)
1359 else if (res->order != order)
1365 static int remove_mpt_ok(struct res_mpt *res)
1367 if (res->com.state == RES_MPT_BUSY)
1369 else if (res->com.state != RES_MPT_RESERVED)
1375 static int remove_eq_ok(struct res_eq *res)
1377 if (res->com.state == RES_MPT_BUSY)
1379 else if (res->com.state != RES_MPT_RESERVED)
1385 static int remove_counter_ok(struct res_counter *res)
1387 if (res->com.state == RES_COUNTER_BUSY)
1389 else if (res->com.state != RES_COUNTER_ALLOCATED)
1395 static int remove_xrcdn_ok(struct res_xrcdn *res)
1397 if (res->com.state == RES_XRCD_BUSY)
1399 else if (res->com.state != RES_XRCD_ALLOCATED)
1405 static int remove_fs_rule_ok(struct res_fs_rule *res)
1407 if (res->com.state == RES_FS_RULE_BUSY)
1409 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1415 static int remove_cq_ok(struct res_cq *res)
1417 if (res->com.state == RES_CQ_BUSY)
1419 else if (res->com.state != RES_CQ_ALLOCATED)
1425 static int remove_srq_ok(struct res_srq *res)
1427 if (res->com.state == RES_SRQ_BUSY)
1429 else if (res->com.state != RES_SRQ_ALLOCATED)
1435 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1439 return remove_qp_ok((struct res_qp *)res);
1441 return remove_cq_ok((struct res_cq *)res);
1443 return remove_srq_ok((struct res_srq *)res);
1445 return remove_mpt_ok((struct res_mpt *)res);
1447 return remove_mtt_ok((struct res_mtt *)res, extra);
1451 return remove_eq_ok((struct res_eq *)res);
1453 return remove_counter_ok((struct res_counter *)res);
1455 return remove_xrcdn_ok((struct res_xrcdn *)res);
1457 return remove_fs_rule_ok((struct res_fs_rule *)res);
1463 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1464 enum mlx4_resource type, int extra)
1468 struct mlx4_priv *priv = mlx4_priv(dev);
1469 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1470 struct res_common *r;
1472 spin_lock_irq(mlx4_tlock(dev));
1473 for (i = base; i < base + count; ++i) {
1474 r = res_tracker_lookup(&tracker->res_tree[type], i);
1479 if (r->owner != slave) {
1483 err = remove_ok(r, type, extra);
1488 for (i = base; i < base + count; ++i) {
1489 r = res_tracker_lookup(&tracker->res_tree[type], i);
1490 rb_erase(&r->node, &tracker->res_tree[type]);
1497 spin_unlock_irq(mlx4_tlock(dev));
1502 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1503 enum res_qp_states state, struct res_qp **qp,
1506 struct mlx4_priv *priv = mlx4_priv(dev);
1507 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1511 spin_lock_irq(mlx4_tlock(dev));
1512 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1515 else if (r->com.owner != slave)
1520 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1521 __func__, r->com.res_id);
1525 case RES_QP_RESERVED:
1526 if (r->com.state == RES_QP_MAPPED && !alloc)
1529 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1534 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1535 r->com.state == RES_QP_HW)
1538 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1546 if (r->com.state != RES_QP_MAPPED)
1554 r->com.from_state = r->com.state;
1555 r->com.to_state = state;
1556 r->com.state = RES_QP_BUSY;
1562 spin_unlock_irq(mlx4_tlock(dev));
1567 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1568 enum res_mpt_states state, struct res_mpt **mpt)
1570 struct mlx4_priv *priv = mlx4_priv(dev);
1571 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1575 spin_lock_irq(mlx4_tlock(dev));
1576 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1579 else if (r->com.owner != slave)
1587 case RES_MPT_RESERVED:
1588 if (r->com.state != RES_MPT_MAPPED)
1592 case RES_MPT_MAPPED:
1593 if (r->com.state != RES_MPT_RESERVED &&
1594 r->com.state != RES_MPT_HW)
1599 if (r->com.state != RES_MPT_MAPPED)
1607 r->com.from_state = r->com.state;
1608 r->com.to_state = state;
1609 r->com.state = RES_MPT_BUSY;
1615 spin_unlock_irq(mlx4_tlock(dev));
1620 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1621 enum res_eq_states state, struct res_eq **eq)
1623 struct mlx4_priv *priv = mlx4_priv(dev);
1624 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1628 spin_lock_irq(mlx4_tlock(dev));
1629 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1632 else if (r->com.owner != slave)
1640 case RES_EQ_RESERVED:
1641 if (r->com.state != RES_EQ_HW)
1646 if (r->com.state != RES_EQ_RESERVED)
1655 r->com.from_state = r->com.state;
1656 r->com.to_state = state;
1657 r->com.state = RES_EQ_BUSY;
1661 spin_unlock_irq(mlx4_tlock(dev));
1669 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1670 enum res_cq_states state, struct res_cq **cq)
1672 struct mlx4_priv *priv = mlx4_priv(dev);
1673 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1677 spin_lock_irq(mlx4_tlock(dev));
1678 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1681 } else if (r->com.owner != slave) {
1683 } else if (state == RES_CQ_ALLOCATED) {
1684 if (r->com.state != RES_CQ_HW)
1686 else if (atomic_read(&r->ref_count))
1690 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1697 r->com.from_state = r->com.state;
1698 r->com.to_state = state;
1699 r->com.state = RES_CQ_BUSY;
1704 spin_unlock_irq(mlx4_tlock(dev));
1709 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1710 enum res_srq_states state, struct res_srq **srq)
1712 struct mlx4_priv *priv = mlx4_priv(dev);
1713 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1717 spin_lock_irq(mlx4_tlock(dev));
1718 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1721 } else if (r->com.owner != slave) {
1723 } else if (state == RES_SRQ_ALLOCATED) {
1724 if (r->com.state != RES_SRQ_HW)
1726 else if (atomic_read(&r->ref_count))
1728 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1733 r->com.from_state = r->com.state;
1734 r->com.to_state = state;
1735 r->com.state = RES_SRQ_BUSY;
1740 spin_unlock_irq(mlx4_tlock(dev));
1745 static void res_abort_move(struct mlx4_dev *dev, int slave,
1746 enum mlx4_resource type, int id)
1748 struct mlx4_priv *priv = mlx4_priv(dev);
1749 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1750 struct res_common *r;
1752 spin_lock_irq(mlx4_tlock(dev));
1753 r = res_tracker_lookup(&tracker->res_tree[type], id);
1754 if (r && (r->owner == slave))
1755 r->state = r->from_state;
1756 spin_unlock_irq(mlx4_tlock(dev));
1759 static void res_end_move(struct mlx4_dev *dev, int slave,
1760 enum mlx4_resource type, int id)
1762 struct mlx4_priv *priv = mlx4_priv(dev);
1763 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1764 struct res_common *r;
1766 spin_lock_irq(mlx4_tlock(dev));
1767 r = res_tracker_lookup(&tracker->res_tree[type], id);
1768 if (r && (r->owner == slave))
1769 r->state = r->to_state;
1770 spin_unlock_irq(mlx4_tlock(dev));
1773 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1775 return mlx4_is_qp_reserved(dev, qpn) &&
1776 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1779 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1781 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1784 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1785 u64 in_param, u64 *out_param)
1795 case RES_OP_RESERVE:
1796 count = get_param_l(&in_param) & 0xffffff;
1797 /* Turn off all unsupported QP allocation flags that the
1798 * slave tries to set.
1800 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1801 align = get_param_h(&in_param);
1802 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1806 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1808 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1812 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1814 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1815 __mlx4_qp_release_range(dev, base, count);
1818 set_param_l(out_param, base);
1820 case RES_OP_MAP_ICM:
1821 qpn = get_param_l(&in_param) & 0x7fffff;
1822 if (valid_reserved(dev, slave, qpn)) {
1823 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1828 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1833 if (!fw_reserved(dev, qpn)) {
1834 err = __mlx4_qp_alloc_icm(dev, qpn);
1836 res_abort_move(dev, slave, RES_QP, qpn);
1841 res_end_move(dev, slave, RES_QP, qpn);
1851 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1852 u64 in_param, u64 *out_param)
1858 if (op != RES_OP_RESERVE_AND_MAP)
1861 order = get_param_l(&in_param);
1863 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1867 base = __mlx4_alloc_mtt_range(dev, order);
1869 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1873 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1875 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1876 __mlx4_free_mtt_range(dev, base, order);
1878 set_param_l(out_param, base);
1884 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1885 u64 in_param, u64 *out_param)
1890 struct res_mpt *mpt;
1893 case RES_OP_RESERVE:
1894 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1898 index = __mlx4_mpt_reserve(dev);
1900 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1903 id = index & mpt_mask(dev);
1905 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1907 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1908 __mlx4_mpt_release(dev, index);
1911 set_param_l(out_param, index);
1913 case RES_OP_MAP_ICM:
1914 index = get_param_l(&in_param);
1915 id = index & mpt_mask(dev);
1916 err = mr_res_start_move_to(dev, slave, id,
1917 RES_MPT_MAPPED, &mpt);
1921 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1923 res_abort_move(dev, slave, RES_MPT, id);
1927 res_end_move(dev, slave, RES_MPT, id);
1933 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1934 u64 in_param, u64 *out_param)
1940 case RES_OP_RESERVE_AND_MAP:
1941 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1945 err = __mlx4_cq_alloc_icm(dev, &cqn);
1947 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1951 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1953 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1954 __mlx4_cq_free_icm(dev, cqn);
1958 set_param_l(out_param, cqn);
1968 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1969 u64 in_param, u64 *out_param)
1975 case RES_OP_RESERVE_AND_MAP:
1976 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1980 err = __mlx4_srq_alloc_icm(dev, &srqn);
1982 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1986 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1988 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1989 __mlx4_srq_free_icm(dev, srqn);
1993 set_param_l(out_param, srqn);
2003 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
2004 u8 smac_index, u64 *mac)
2006 struct mlx4_priv *priv = mlx4_priv(dev);
2007 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2008 struct list_head *mac_list =
2009 &tracker->slave_list[slave].res_list[RES_MAC];
2010 struct mac_res *res, *tmp;
2012 list_for_each_entry_safe(res, tmp, mac_list, list) {
2013 if (res->smac_index == smac_index && res->port == (u8) port) {
2021 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2023 struct mlx4_priv *priv = mlx4_priv(dev);
2024 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2025 struct list_head *mac_list =
2026 &tracker->slave_list[slave].res_list[RES_MAC];
2027 struct mac_res *res, *tmp;
2029 list_for_each_entry_safe(res, tmp, mac_list, list) {
2030 if (res->mac == mac && res->port == (u8) port) {
2031 /* mac found. update ref count */
2037 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2039 res = kzalloc(sizeof(*res), GFP_KERNEL);
2041 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2045 res->port = (u8) port;
2046 res->smac_index = smac_index;
2048 list_add_tail(&res->list,
2049 &tracker->slave_list[slave].res_list[RES_MAC]);
2053 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2056 struct mlx4_priv *priv = mlx4_priv(dev);
2057 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2058 struct list_head *mac_list =
2059 &tracker->slave_list[slave].res_list[RES_MAC];
2060 struct mac_res *res, *tmp;
2062 list_for_each_entry_safe(res, tmp, mac_list, list) {
2063 if (res->mac == mac && res->port == (u8) port) {
2064 if (!--res->ref_count) {
2065 list_del(&res->list);
2066 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2074 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2076 struct mlx4_priv *priv = mlx4_priv(dev);
2077 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2078 struct list_head *mac_list =
2079 &tracker->slave_list[slave].res_list[RES_MAC];
2080 struct mac_res *res, *tmp;
2083 list_for_each_entry_safe(res, tmp, mac_list, list) {
2084 list_del(&res->list);
2085 /* dereference the mac the num times the slave referenced it */
2086 for (i = 0; i < res->ref_count; i++)
2087 __mlx4_unregister_mac(dev, res->port, res->mac);
2088 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2093 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2094 u64 in_param, u64 *out_param, int in_port)
2101 if (op != RES_OP_RESERVE_AND_MAP)
2104 port = !in_port ? get_param_l(out_param) : in_port;
2105 port = mlx4_slave_convert_port(
2112 err = __mlx4_register_mac(dev, port, mac);
2115 set_param_l(out_param, err);
2120 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2122 __mlx4_unregister_mac(dev, port, mac);
2127 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2128 int port, int vlan_index)
2130 struct mlx4_priv *priv = mlx4_priv(dev);
2131 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2132 struct list_head *vlan_list =
2133 &tracker->slave_list[slave].res_list[RES_VLAN];
2134 struct vlan_res *res, *tmp;
2136 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2137 if (res->vlan == vlan && res->port == (u8) port) {
2138 /* vlan found. update ref count */
2144 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2146 res = kzalloc(sizeof(*res), GFP_KERNEL);
2148 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2152 res->port = (u8) port;
2153 res->vlan_index = vlan_index;
2155 list_add_tail(&res->list,
2156 &tracker->slave_list[slave].res_list[RES_VLAN]);
2161 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2164 struct mlx4_priv *priv = mlx4_priv(dev);
2165 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2166 struct list_head *vlan_list =
2167 &tracker->slave_list[slave].res_list[RES_VLAN];
2168 struct vlan_res *res, *tmp;
2170 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2171 if (res->vlan == vlan && res->port == (u8) port) {
2172 if (!--res->ref_count) {
2173 list_del(&res->list);
2174 mlx4_release_resource(dev, slave, RES_VLAN,
2183 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2185 struct mlx4_priv *priv = mlx4_priv(dev);
2186 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2187 struct list_head *vlan_list =
2188 &tracker->slave_list[slave].res_list[RES_VLAN];
2189 struct vlan_res *res, *tmp;
2192 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2193 list_del(&res->list);
2194 /* dereference the vlan the num times the slave referenced it */
2195 for (i = 0; i < res->ref_count; i++)
2196 __mlx4_unregister_vlan(dev, res->port, res->vlan);
2197 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2202 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2203 u64 in_param, u64 *out_param, int in_port)
2205 struct mlx4_priv *priv = mlx4_priv(dev);
2206 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2212 port = !in_port ? get_param_l(out_param) : in_port;
2214 if (!port || op != RES_OP_RESERVE_AND_MAP)
2217 port = mlx4_slave_convert_port(
2222 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2223 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2224 slave_state[slave].old_vlan_api = true;
2228 vlan = (u16) in_param;
2230 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2232 set_param_l(out_param, (u32) vlan_index);
2233 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2235 __mlx4_unregister_vlan(dev, port, vlan);
2240 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2241 u64 in_param, u64 *out_param, int port)
2246 if (op != RES_OP_RESERVE)
2249 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2253 err = __mlx4_counter_alloc(dev, &index);
2255 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2259 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2261 __mlx4_counter_free(dev, index);
2262 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2264 set_param_l(out_param, index);
2270 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2271 u64 in_param, u64 *out_param)
2276 if (op != RES_OP_RESERVE)
2279 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2283 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2285 __mlx4_xrcd_free(dev, xrcdn);
2287 set_param_l(out_param, xrcdn);
2292 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2293 struct mlx4_vhcr *vhcr,
2294 struct mlx4_cmd_mailbox *inbox,
2295 struct mlx4_cmd_mailbox *outbox,
2296 struct mlx4_cmd_info *cmd)
2299 int alop = vhcr->op_modifier;
2301 switch (vhcr->in_modifier & 0xFF) {
2303 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2304 vhcr->in_param, &vhcr->out_param);
2308 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2309 vhcr->in_param, &vhcr->out_param);
2313 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2314 vhcr->in_param, &vhcr->out_param);
2318 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2319 vhcr->in_param, &vhcr->out_param);
2323 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2324 vhcr->in_param, &vhcr->out_param);
2328 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2329 vhcr->in_param, &vhcr->out_param,
2330 (vhcr->in_modifier >> 8) & 0xFF);
2334 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2335 vhcr->in_param, &vhcr->out_param,
2336 (vhcr->in_modifier >> 8) & 0xFF);
2340 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2341 vhcr->in_param, &vhcr->out_param, 0);
2345 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2346 vhcr->in_param, &vhcr->out_param);
2357 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2366 case RES_OP_RESERVE:
2367 base = get_param_l(&in_param) & 0x7fffff;
2368 count = get_param_h(&in_param);
2369 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2372 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2373 __mlx4_qp_release_range(dev, base, count);
2375 case RES_OP_MAP_ICM:
2376 qpn = get_param_l(&in_param) & 0x7fffff;
2377 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2382 if (!fw_reserved(dev, qpn))
2383 __mlx4_qp_free_icm(dev, qpn);
2385 res_end_move(dev, slave, RES_QP, qpn);
2387 if (valid_reserved(dev, slave, qpn))
2388 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2397 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2398 u64 in_param, u64 *out_param)
2404 if (op != RES_OP_RESERVE_AND_MAP)
2407 base = get_param_l(&in_param);
2408 order = get_param_h(&in_param);
2409 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2411 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2412 __mlx4_free_mtt_range(dev, base, order);
2417 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2423 struct res_mpt *mpt;
2426 case RES_OP_RESERVE:
2427 index = get_param_l(&in_param);
2428 id = index & mpt_mask(dev);
2429 err = get_res(dev, slave, id, RES_MPT, &mpt);
2433 put_res(dev, slave, id, RES_MPT);
2435 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2438 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2439 __mlx4_mpt_release(dev, index);
2441 case RES_OP_MAP_ICM:
2442 index = get_param_l(&in_param);
2443 id = index & mpt_mask(dev);
2444 err = mr_res_start_move_to(dev, slave, id,
2445 RES_MPT_RESERVED, &mpt);
2449 __mlx4_mpt_free_icm(dev, mpt->key);
2450 res_end_move(dev, slave, RES_MPT, id);
2459 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2460 u64 in_param, u64 *out_param)
2466 case RES_OP_RESERVE_AND_MAP:
2467 cqn = get_param_l(&in_param);
2468 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2472 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2473 __mlx4_cq_free_icm(dev, cqn);
2484 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2485 u64 in_param, u64 *out_param)
2491 case RES_OP_RESERVE_AND_MAP:
2492 srqn = get_param_l(&in_param);
2493 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2497 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2498 __mlx4_srq_free_icm(dev, srqn);
2509 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2510 u64 in_param, u64 *out_param, int in_port)
2516 case RES_OP_RESERVE_AND_MAP:
2517 port = !in_port ? get_param_l(out_param) : in_port;
2518 port = mlx4_slave_convert_port(
2523 mac_del_from_slave(dev, slave, in_param, port);
2524 __mlx4_unregister_mac(dev, port, in_param);
2535 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2536 u64 in_param, u64 *out_param, int port)
2538 struct mlx4_priv *priv = mlx4_priv(dev);
2539 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2542 port = mlx4_slave_convert_port(
2548 case RES_OP_RESERVE_AND_MAP:
2549 if (slave_state[slave].old_vlan_api)
2553 vlan_del_from_slave(dev, slave, in_param, port);
2554 __mlx4_unregister_vlan(dev, port, in_param);
2564 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2565 u64 in_param, u64 *out_param)
2570 if (op != RES_OP_RESERVE)
2573 index = get_param_l(&in_param);
2574 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2577 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2581 __mlx4_counter_free(dev, index);
2582 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2587 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2588 u64 in_param, u64 *out_param)
2593 if (op != RES_OP_RESERVE)
2596 xrcdn = get_param_l(&in_param);
2597 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2601 __mlx4_xrcd_free(dev, xrcdn);
2606 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2607 struct mlx4_vhcr *vhcr,
2608 struct mlx4_cmd_mailbox *inbox,
2609 struct mlx4_cmd_mailbox *outbox,
2610 struct mlx4_cmd_info *cmd)
2613 int alop = vhcr->op_modifier;
2615 switch (vhcr->in_modifier & 0xFF) {
2617 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2622 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2623 vhcr->in_param, &vhcr->out_param);
2627 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2632 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2633 vhcr->in_param, &vhcr->out_param);
2637 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2638 vhcr->in_param, &vhcr->out_param);
2642 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2643 vhcr->in_param, &vhcr->out_param,
2644 (vhcr->in_modifier >> 8) & 0xFF);
2648 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2649 vhcr->in_param, &vhcr->out_param,
2650 (vhcr->in_modifier >> 8) & 0xFF);
2654 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2655 vhcr->in_param, &vhcr->out_param);
2659 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2660 vhcr->in_param, &vhcr->out_param);
2668 /* ugly but other choices are uglier */
2669 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2671 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2674 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2676 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2679 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2681 return be32_to_cpu(mpt->mtt_sz);
2684 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2686 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2689 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2691 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2694 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2696 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2699 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2701 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2704 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2706 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2709 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2711 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2714 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2716 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2717 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2718 int log_sq_sride = qpc->sq_size_stride & 7;
2719 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2720 int log_rq_stride = qpc->rq_size_stride & 7;
2721 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2722 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2723 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2724 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2729 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2732 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2733 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2734 total_mem = sq_size + rq_size;
2735 tot = (total_mem + (page_offset << 6)) >> page_shift;
2736 total_pages = !tot ? 1 : roundup_pow_of_two(tot);
2741 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2742 int size, struct res_mtt *mtt)
2744 int res_start = mtt->com.res_id;
2745 int res_size = (1 << mtt->order);
2747 if (start < res_start || start + size > res_start + res_size)
2752 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2753 struct mlx4_vhcr *vhcr,
2754 struct mlx4_cmd_mailbox *inbox,
2755 struct mlx4_cmd_mailbox *outbox,
2756 struct mlx4_cmd_info *cmd)
2759 int index = vhcr->in_modifier;
2760 struct res_mtt *mtt;
2761 struct res_mpt *mpt = NULL;
2762 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2768 id = index & mpt_mask(dev);
2769 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2773 /* Disable memory windows for VFs. */
2774 if (!mr_is_region(inbox->buf)) {
2779 /* Make sure that the PD bits related to the slave id are zeros. */
2780 pd = mr_get_pd(inbox->buf);
2781 pd_slave = (pd >> 17) & 0x7f;
2782 if (pd_slave != 0 && --pd_slave != slave) {
2787 if (mr_is_fmr(inbox->buf)) {
2788 /* FMR and Bind Enable are forbidden in slave devices. */
2789 if (mr_is_bind_enabled(inbox->buf)) {
2793 /* FMR and Memory Windows are also forbidden. */
2794 if (!mr_is_region(inbox->buf)) {
2800 phys = mr_phys_mpt(inbox->buf);
2802 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2806 err = check_mtt_range(dev, slave, mtt_base,
2807 mr_get_mtt_size(inbox->buf), mtt);
2814 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2819 atomic_inc(&mtt->ref_count);
2820 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2823 res_end_move(dev, slave, RES_MPT, id);
2828 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2830 res_abort_move(dev, slave, RES_MPT, id);
2835 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2836 struct mlx4_vhcr *vhcr,
2837 struct mlx4_cmd_mailbox *inbox,
2838 struct mlx4_cmd_mailbox *outbox,
2839 struct mlx4_cmd_info *cmd)
2842 int index = vhcr->in_modifier;
2843 struct res_mpt *mpt;
2846 id = index & mpt_mask(dev);
2847 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2851 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2856 atomic_dec(&mpt->mtt->ref_count);
2858 res_end_move(dev, slave, RES_MPT, id);
2862 res_abort_move(dev, slave, RES_MPT, id);
2867 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2868 struct mlx4_vhcr *vhcr,
2869 struct mlx4_cmd_mailbox *inbox,
2870 struct mlx4_cmd_mailbox *outbox,
2871 struct mlx4_cmd_info *cmd)
2874 int index = vhcr->in_modifier;
2875 struct res_mpt *mpt;
2878 id = index & mpt_mask(dev);
2879 err = get_res(dev, slave, id, RES_MPT, &mpt);
2883 if (mpt->com.from_state == RES_MPT_MAPPED) {
2884 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2885 * that, the VF must read the MPT. But since the MPT entry memory is not
2886 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2887 * entry contents. To guarantee that the MPT cannot be changed, the driver
2888 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2889 * ownership fofollowing the change. The change here allows the VF to
2890 * perform QUERY_MPT also when the entry is in SW ownership.
2892 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2893 &mlx4_priv(dev)->mr_table.dmpt_table,
2896 if (NULL == mpt_entry || NULL == outbox->buf) {
2901 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2904 } else if (mpt->com.from_state == RES_MPT_HW) {
2905 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2913 put_res(dev, slave, id, RES_MPT);
2917 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2919 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2922 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2924 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2927 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2929 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2932 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2933 struct mlx4_qp_context *context)
2935 u32 qpn = vhcr->in_modifier & 0xffffff;
2938 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2941 /* adjust qkey in qp context */
2942 context->qkey = cpu_to_be32(qkey);
2945 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2946 struct mlx4_qp_context *qpc,
2947 struct mlx4_cmd_mailbox *inbox);
2949 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2950 struct mlx4_vhcr *vhcr,
2951 struct mlx4_cmd_mailbox *inbox,
2952 struct mlx4_cmd_mailbox *outbox,
2953 struct mlx4_cmd_info *cmd)
2956 int qpn = vhcr->in_modifier & 0x7fffff;
2957 struct res_mtt *mtt;
2959 struct mlx4_qp_context *qpc = inbox->buf + 8;
2960 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2961 int mtt_size = qp_get_mtt_size(qpc);
2964 int rcqn = qp_get_rcqn(qpc);
2965 int scqn = qp_get_scqn(qpc);
2966 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2967 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2968 struct res_srq *srq;
2969 int local_qpn = vhcr->in_modifier & 0xffffff;
2971 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2975 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2978 qp->local_qpn = local_qpn;
2979 qp->sched_queue = 0;
2981 qp->vlan_control = 0;
2983 qp->pri_path_fl = 0;
2986 qp->qpc_flags = be32_to_cpu(qpc->flags);
2988 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2992 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2996 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
3001 err = get_res(dev, slave, scqn, RES_CQ, &scq);
3008 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3013 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3014 update_pkey_index(dev, slave, inbox);
3015 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3018 atomic_inc(&mtt->ref_count);
3020 atomic_inc(&rcq->ref_count);
3022 atomic_inc(&scq->ref_count);
3026 put_res(dev, slave, scqn, RES_CQ);
3029 atomic_inc(&srq->ref_count);
3030 put_res(dev, slave, srqn, RES_SRQ);
3034 /* Save param3 for dynamic changes from VST back to VGT */
3035 qp->param3 = qpc->param3;
3036 put_res(dev, slave, rcqn, RES_CQ);
3037 put_res(dev, slave, mtt_base, RES_MTT);
3038 res_end_move(dev, slave, RES_QP, qpn);
3044 put_res(dev, slave, srqn, RES_SRQ);
3047 put_res(dev, slave, scqn, RES_CQ);
3049 put_res(dev, slave, rcqn, RES_CQ);
3051 put_res(dev, slave, mtt_base, RES_MTT);
3053 res_abort_move(dev, slave, RES_QP, qpn);
3058 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3060 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3063 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3065 int log_eq_size = eqc->log_eq_size & 0x1f;
3066 int page_shift = (eqc->log_page_size & 0x3f) + 12;
3068 if (log_eq_size + 5 < page_shift)
3071 return 1 << (log_eq_size + 5 - page_shift);
3074 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3076 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3079 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3081 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3082 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3084 if (log_cq_size + 5 < page_shift)
3087 return 1 << (log_cq_size + 5 - page_shift);
3090 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3091 struct mlx4_vhcr *vhcr,
3092 struct mlx4_cmd_mailbox *inbox,
3093 struct mlx4_cmd_mailbox *outbox,
3094 struct mlx4_cmd_info *cmd)
3097 int eqn = vhcr->in_modifier;
3098 int res_id = (slave << 10) | eqn;
3099 struct mlx4_eq_context *eqc = inbox->buf;
3100 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3101 int mtt_size = eq_get_mtt_size(eqc);
3103 struct res_mtt *mtt;
3105 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3108 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3112 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3116 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3120 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3124 atomic_inc(&mtt->ref_count);
3126 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3127 res_end_move(dev, slave, RES_EQ, res_id);
3131 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3133 res_abort_move(dev, slave, RES_EQ, res_id);
3135 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3139 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3140 struct mlx4_vhcr *vhcr,
3141 struct mlx4_cmd_mailbox *inbox,
3142 struct mlx4_cmd_mailbox *outbox,
3143 struct mlx4_cmd_info *cmd)
3146 u8 get = vhcr->op_modifier;
3151 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3156 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3157 int len, struct res_mtt **res)
3159 struct mlx4_priv *priv = mlx4_priv(dev);
3160 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3161 struct res_mtt *mtt;
3164 spin_lock_irq(mlx4_tlock(dev));
3165 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3167 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3169 mtt->com.from_state = mtt->com.state;
3170 mtt->com.state = RES_MTT_BUSY;
3175 spin_unlock_irq(mlx4_tlock(dev));
3180 static int verify_qp_parameters(struct mlx4_dev *dev,
3181 struct mlx4_vhcr *vhcr,
3182 struct mlx4_cmd_mailbox *inbox,
3183 enum qp_transition transition, u8 slave)
3187 struct mlx4_qp_context *qp_ctx;
3188 enum mlx4_qp_optpar optpar;
3192 qp_ctx = inbox->buf + 8;
3193 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3194 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3196 if (slave != mlx4_master_func_num(dev)) {
3197 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3198 /* setting QP rate-limit is disallowed for VFs */
3199 if (qp_ctx->rate_limit_params)
3205 case MLX4_QP_ST_XRC:
3207 switch (transition) {
3208 case QP_TRANS_INIT2RTR:
3209 case QP_TRANS_RTR2RTS:
3210 case QP_TRANS_RTS2RTS:
3211 case QP_TRANS_SQD2SQD:
3212 case QP_TRANS_SQD2RTS:
3213 if (slave != mlx4_master_func_num(dev)) {
3214 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3215 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3216 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3217 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3220 if (qp_ctx->pri_path.mgid_index >= num_gids)
3223 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3224 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3225 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3226 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3229 if (qp_ctx->alt_path.mgid_index >= num_gids)
3239 case MLX4_QP_ST_MLX:
3240 qpn = vhcr->in_modifier & 0x7fffff;
3241 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3242 if (transition == QP_TRANS_INIT2RTR &&
3243 slave != mlx4_master_func_num(dev) &&
3244 mlx4_is_qp_reserved(dev, qpn) &&
3245 !mlx4_vf_smi_enabled(dev, slave, port)) {
3246 /* only enabled VFs may create MLX proxy QPs */
3247 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3248 __func__, slave, port);
3260 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3261 struct mlx4_vhcr *vhcr,
3262 struct mlx4_cmd_mailbox *inbox,
3263 struct mlx4_cmd_mailbox *outbox,
3264 struct mlx4_cmd_info *cmd)
3266 struct mlx4_mtt mtt;
3267 __be64 *page_list = inbox->buf;
3268 u64 *pg_list = (u64 *)page_list;
3270 struct res_mtt *rmtt = NULL;
3271 int start = be64_to_cpu(page_list[0]);
3272 int npages = vhcr->in_modifier;
3275 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3279 /* Call the SW implementation of write_mtt:
3280 * - Prepare a dummy mtt struct
3281 * - Translate inbox contents to simple addresses in host endianness */
3282 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3283 we don't really use it */
3286 for (i = 0; i < npages; ++i)
3287 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3289 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3290 ((u64 *)page_list + 2));
3293 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3298 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3299 struct mlx4_vhcr *vhcr,
3300 struct mlx4_cmd_mailbox *inbox,
3301 struct mlx4_cmd_mailbox *outbox,
3302 struct mlx4_cmd_info *cmd)
3304 int eqn = vhcr->in_modifier;
3305 int res_id = eqn | (slave << 10);
3309 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3313 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3317 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3321 atomic_dec(&eq->mtt->ref_count);
3322 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3323 res_end_move(dev, slave, RES_EQ, res_id);
3324 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3329 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3331 res_abort_move(dev, slave, RES_EQ, res_id);
3336 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3338 struct mlx4_priv *priv = mlx4_priv(dev);
3339 struct mlx4_slave_event_eq_info *event_eq;
3340 struct mlx4_cmd_mailbox *mailbox;
3341 u32 in_modifier = 0;
3346 if (!priv->mfunc.master.slave_state)
3349 /* check for slave valid, slave not PF, and slave active */
3350 if (slave < 0 || slave > dev->persist->num_vfs ||
3351 slave == dev->caps.function ||
3352 !priv->mfunc.master.slave_state[slave].active)
3355 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3357 /* Create the event only if the slave is registered */
3358 if (event_eq->eqn < 0)
3361 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3362 res_id = (slave << 10) | event_eq->eqn;
3363 err = get_res(dev, slave, res_id, RES_EQ, &req);
3367 if (req->com.from_state != RES_EQ_HW) {
3372 mailbox = mlx4_alloc_cmd_mailbox(dev);
3373 if (IS_ERR(mailbox)) {
3374 err = PTR_ERR(mailbox);
3378 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3380 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3383 memcpy(mailbox->buf, (u8 *) eqe, 28);
3385 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3387 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3388 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3391 put_res(dev, slave, res_id, RES_EQ);
3392 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3393 mlx4_free_cmd_mailbox(dev, mailbox);
3397 put_res(dev, slave, res_id, RES_EQ);
3400 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3404 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3405 struct mlx4_vhcr *vhcr,
3406 struct mlx4_cmd_mailbox *inbox,
3407 struct mlx4_cmd_mailbox *outbox,
3408 struct mlx4_cmd_info *cmd)
3410 int eqn = vhcr->in_modifier;
3411 int res_id = eqn | (slave << 10);
3415 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3419 if (eq->com.from_state != RES_EQ_HW) {
3424 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3427 put_res(dev, slave, res_id, RES_EQ);
3431 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3432 struct mlx4_vhcr *vhcr,
3433 struct mlx4_cmd_mailbox *inbox,
3434 struct mlx4_cmd_mailbox *outbox,
3435 struct mlx4_cmd_info *cmd)
3438 int cqn = vhcr->in_modifier;
3439 struct mlx4_cq_context *cqc = inbox->buf;
3440 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3441 struct res_cq *cq = NULL;
3442 struct res_mtt *mtt;
3444 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3447 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3450 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3453 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3456 atomic_inc(&mtt->ref_count);
3458 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3459 res_end_move(dev, slave, RES_CQ, cqn);
3463 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3465 res_abort_move(dev, slave, RES_CQ, cqn);
3469 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3470 struct mlx4_vhcr *vhcr,
3471 struct mlx4_cmd_mailbox *inbox,
3472 struct mlx4_cmd_mailbox *outbox,
3473 struct mlx4_cmd_info *cmd)
3476 int cqn = vhcr->in_modifier;
3477 struct res_cq *cq = NULL;
3479 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3482 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3485 atomic_dec(&cq->mtt->ref_count);
3486 res_end_move(dev, slave, RES_CQ, cqn);
3490 res_abort_move(dev, slave, RES_CQ, cqn);
3494 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3495 struct mlx4_vhcr *vhcr,
3496 struct mlx4_cmd_mailbox *inbox,
3497 struct mlx4_cmd_mailbox *outbox,
3498 struct mlx4_cmd_info *cmd)
3500 int cqn = vhcr->in_modifier;
3504 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3508 if (cq->com.from_state != RES_CQ_HW)
3511 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3513 put_res(dev, slave, cqn, RES_CQ);
3518 static int handle_resize(struct mlx4_dev *dev, int slave,
3519 struct mlx4_vhcr *vhcr,
3520 struct mlx4_cmd_mailbox *inbox,
3521 struct mlx4_cmd_mailbox *outbox,
3522 struct mlx4_cmd_info *cmd,
3526 struct res_mtt *orig_mtt;
3527 struct res_mtt *mtt;
3528 struct mlx4_cq_context *cqc = inbox->buf;
3529 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3531 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3535 if (orig_mtt != cq->mtt) {
3540 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3544 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3547 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3550 atomic_dec(&orig_mtt->ref_count);
3551 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3552 atomic_inc(&mtt->ref_count);
3554 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3558 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3560 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3566 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3567 struct mlx4_vhcr *vhcr,
3568 struct mlx4_cmd_mailbox *inbox,
3569 struct mlx4_cmd_mailbox *outbox,
3570 struct mlx4_cmd_info *cmd)
3572 int cqn = vhcr->in_modifier;
3576 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3580 if (cq->com.from_state != RES_CQ_HW)
3583 if (vhcr->op_modifier == 0) {
3584 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3588 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3590 put_res(dev, slave, cqn, RES_CQ);
3595 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3597 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3598 int log_rq_stride = srqc->logstride & 7;
3599 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3601 if (log_srq_size + log_rq_stride + 4 < page_shift)
3604 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3607 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3608 struct mlx4_vhcr *vhcr,
3609 struct mlx4_cmd_mailbox *inbox,
3610 struct mlx4_cmd_mailbox *outbox,
3611 struct mlx4_cmd_info *cmd)
3614 int srqn = vhcr->in_modifier;
3615 struct res_mtt *mtt;
3616 struct res_srq *srq = NULL;
3617 struct mlx4_srq_context *srqc = inbox->buf;
3618 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3620 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3623 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3626 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3629 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3634 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3638 atomic_inc(&mtt->ref_count);
3640 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3641 res_end_move(dev, slave, RES_SRQ, srqn);
3645 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3647 res_abort_move(dev, slave, RES_SRQ, srqn);
3652 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3653 struct mlx4_vhcr *vhcr,
3654 struct mlx4_cmd_mailbox *inbox,
3655 struct mlx4_cmd_mailbox *outbox,
3656 struct mlx4_cmd_info *cmd)
3659 int srqn = vhcr->in_modifier;
3660 struct res_srq *srq = NULL;
3662 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3665 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3668 atomic_dec(&srq->mtt->ref_count);
3670 atomic_dec(&srq->cq->ref_count);
3671 res_end_move(dev, slave, RES_SRQ, srqn);
3676 res_abort_move(dev, slave, RES_SRQ, srqn);
3681 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3682 struct mlx4_vhcr *vhcr,
3683 struct mlx4_cmd_mailbox *inbox,
3684 struct mlx4_cmd_mailbox *outbox,
3685 struct mlx4_cmd_info *cmd)
3688 int srqn = vhcr->in_modifier;
3689 struct res_srq *srq;
3691 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3694 if (srq->com.from_state != RES_SRQ_HW) {
3698 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3700 put_res(dev, slave, srqn, RES_SRQ);
3704 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3705 struct mlx4_vhcr *vhcr,
3706 struct mlx4_cmd_mailbox *inbox,
3707 struct mlx4_cmd_mailbox *outbox,
3708 struct mlx4_cmd_info *cmd)
3711 int srqn = vhcr->in_modifier;
3712 struct res_srq *srq;
3714 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3718 if (srq->com.from_state != RES_SRQ_HW) {
3723 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3725 put_res(dev, slave, srqn, RES_SRQ);
3729 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3730 struct mlx4_vhcr *vhcr,
3731 struct mlx4_cmd_mailbox *inbox,
3732 struct mlx4_cmd_mailbox *outbox,
3733 struct mlx4_cmd_info *cmd)
3736 int qpn = vhcr->in_modifier & 0x7fffff;
3739 err = get_res(dev, slave, qpn, RES_QP, &qp);
3742 if (qp->com.from_state != RES_QP_HW) {
3747 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3749 put_res(dev, slave, qpn, RES_QP);
3753 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3754 struct mlx4_vhcr *vhcr,
3755 struct mlx4_cmd_mailbox *inbox,
3756 struct mlx4_cmd_mailbox *outbox,
3757 struct mlx4_cmd_info *cmd)
3759 struct mlx4_qp_context *context = inbox->buf + 8;
3760 adjust_proxy_tun_qkey(dev, vhcr, context);
3761 update_pkey_index(dev, slave, inbox);
3762 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3765 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3766 struct mlx4_qp_context *qpc,
3767 struct mlx4_cmd_mailbox *inbox)
3769 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3771 int port = mlx4_slave_convert_port(
3772 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3777 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3780 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3781 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3782 qpc->pri_path.sched_queue = pri_sched_queue;
3785 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3786 port = mlx4_slave_convert_port(
3787 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3791 qpc->alt_path.sched_queue =
3792 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3798 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3799 struct mlx4_qp_context *qpc,
3800 struct mlx4_cmd_mailbox *inbox)
3804 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3805 u8 sched = *(u8 *)(inbox->buf + 64);
3808 port = (sched >> 6 & 1) + 1;
3809 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3810 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3811 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3817 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3818 struct mlx4_vhcr *vhcr,
3819 struct mlx4_cmd_mailbox *inbox,
3820 struct mlx4_cmd_mailbox *outbox,
3821 struct mlx4_cmd_info *cmd)
3824 struct mlx4_qp_context *qpc = inbox->buf + 8;
3825 int qpn = vhcr->in_modifier & 0x7fffff;
3827 u8 orig_sched_queue;
3828 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3829 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3830 u8 orig_pri_path_fl = qpc->pri_path.fl;
3831 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3832 u8 orig_feup = qpc->pri_path.feup;
3834 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3837 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3841 if (roce_verify_mac(dev, slave, qpc, inbox))
3844 update_pkey_index(dev, slave, inbox);
3845 update_gid(dev, inbox, (u8)slave);
3846 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3847 orig_sched_queue = qpc->pri_path.sched_queue;
3849 err = get_res(dev, slave, qpn, RES_QP, &qp);
3852 if (qp->com.from_state != RES_QP_HW) {
3857 err = update_vport_qp_param(dev, inbox, slave, qpn);
3861 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3863 /* if no error, save sched queue value passed in by VF. This is
3864 * essentially the QOS value provided by the VF. This will be useful
3865 * if we allow dynamic changes from VST back to VGT
3868 qp->sched_queue = orig_sched_queue;
3869 qp->vlan_control = orig_vlan_control;
3870 qp->fvl_rx = orig_fvl_rx;
3871 qp->pri_path_fl = orig_pri_path_fl;
3872 qp->vlan_index = orig_vlan_index;
3873 qp->feup = orig_feup;
3875 put_res(dev, slave, qpn, RES_QP);
3879 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3880 struct mlx4_vhcr *vhcr,
3881 struct mlx4_cmd_mailbox *inbox,
3882 struct mlx4_cmd_mailbox *outbox,
3883 struct mlx4_cmd_info *cmd)
3886 struct mlx4_qp_context *context = inbox->buf + 8;
3888 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3891 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3895 update_pkey_index(dev, slave, inbox);
3896 update_gid(dev, inbox, (u8)slave);
3897 adjust_proxy_tun_qkey(dev, vhcr, context);
3898 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3901 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3902 struct mlx4_vhcr *vhcr,
3903 struct mlx4_cmd_mailbox *inbox,
3904 struct mlx4_cmd_mailbox *outbox,
3905 struct mlx4_cmd_info *cmd)
3908 struct mlx4_qp_context *context = inbox->buf + 8;
3910 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3913 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3917 update_pkey_index(dev, slave, inbox);
3918 update_gid(dev, inbox, (u8)slave);
3919 adjust_proxy_tun_qkey(dev, vhcr, context);
3920 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3924 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3925 struct mlx4_vhcr *vhcr,
3926 struct mlx4_cmd_mailbox *inbox,
3927 struct mlx4_cmd_mailbox *outbox,
3928 struct mlx4_cmd_info *cmd)
3930 struct mlx4_qp_context *context = inbox->buf + 8;
3931 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3934 adjust_proxy_tun_qkey(dev, vhcr, context);
3935 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3938 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3939 struct mlx4_vhcr *vhcr,
3940 struct mlx4_cmd_mailbox *inbox,
3941 struct mlx4_cmd_mailbox *outbox,
3942 struct mlx4_cmd_info *cmd)
3945 struct mlx4_qp_context *context = inbox->buf + 8;
3947 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3950 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3954 adjust_proxy_tun_qkey(dev, vhcr, context);
3955 update_gid(dev, inbox, (u8)slave);
3956 update_pkey_index(dev, slave, inbox);
3957 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3960 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3961 struct mlx4_vhcr *vhcr,
3962 struct mlx4_cmd_mailbox *inbox,
3963 struct mlx4_cmd_mailbox *outbox,
3964 struct mlx4_cmd_info *cmd)
3967 struct mlx4_qp_context *context = inbox->buf + 8;
3969 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3972 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3976 adjust_proxy_tun_qkey(dev, vhcr, context);
3977 update_gid(dev, inbox, (u8)slave);
3978 update_pkey_index(dev, slave, inbox);
3979 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3982 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3983 struct mlx4_vhcr *vhcr,
3984 struct mlx4_cmd_mailbox *inbox,
3985 struct mlx4_cmd_mailbox *outbox,
3986 struct mlx4_cmd_info *cmd)
3989 int qpn = vhcr->in_modifier & 0x7fffff;
3992 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3995 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3999 atomic_dec(&qp->mtt->ref_count);
4000 atomic_dec(&qp->rcq->ref_count);
4001 atomic_dec(&qp->scq->ref_count);
4003 atomic_dec(&qp->srq->ref_count);
4004 res_end_move(dev, slave, RES_QP, qpn);
4008 res_abort_move(dev, slave, RES_QP, qpn);
4013 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
4014 struct res_qp *rqp, u8 *gid)
4016 struct res_gid *res;
4018 list_for_each_entry(res, &rqp->mcg_list, list) {
4019 if (!memcmp(res->gid, gid, 16))
4025 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4026 u8 *gid, enum mlx4_protocol prot,
4027 enum mlx4_steer_type steer, u64 reg_id)
4029 struct res_gid *res;
4032 res = kzalloc(sizeof(*res), GFP_KERNEL);
4036 spin_lock_irq(&rqp->mcg_spl);
4037 if (find_gid(dev, slave, rqp, gid)) {
4041 memcpy(res->gid, gid, 16);
4044 res->reg_id = reg_id;
4045 list_add_tail(&res->list, &rqp->mcg_list);
4048 spin_unlock_irq(&rqp->mcg_spl);
4053 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4054 u8 *gid, enum mlx4_protocol prot,
4055 enum mlx4_steer_type steer, u64 *reg_id)
4057 struct res_gid *res;
4060 spin_lock_irq(&rqp->mcg_spl);
4061 res = find_gid(dev, slave, rqp, gid);
4062 if (!res || res->prot != prot || res->steer != steer)
4065 *reg_id = res->reg_id;
4066 list_del(&res->list);
4070 spin_unlock_irq(&rqp->mcg_spl);
4075 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4076 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4077 enum mlx4_steer_type type, u64 *reg_id)
4079 switch (dev->caps.steering_mode) {
4080 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4081 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4084 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4085 block_loopback, prot,
4088 case MLX4_STEERING_MODE_B0:
4089 if (prot == MLX4_PROT_ETH) {
4090 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4095 return mlx4_qp_attach_common(dev, qp, gid,
4096 block_loopback, prot, type);
4102 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4103 u8 gid[16], enum mlx4_protocol prot,
4104 enum mlx4_steer_type type, u64 reg_id)
4106 switch (dev->caps.steering_mode) {
4107 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4108 return mlx4_flow_detach(dev, reg_id);
4109 case MLX4_STEERING_MODE_B0:
4110 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4116 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4117 u8 *gid, enum mlx4_protocol prot)
4121 if (prot != MLX4_PROT_ETH)
4124 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4125 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4126 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4135 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4136 struct mlx4_vhcr *vhcr,
4137 struct mlx4_cmd_mailbox *inbox,
4138 struct mlx4_cmd_mailbox *outbox,
4139 struct mlx4_cmd_info *cmd)
4141 struct mlx4_qp qp; /* dummy for calling attach/detach */
4142 u8 *gid = inbox->buf;
4143 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4148 int attach = vhcr->op_modifier;
4149 int block_loopback = vhcr->in_modifier >> 31;
4150 u8 steer_type_mask = 2;
4151 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4153 qpn = vhcr->in_modifier & 0xffffff;
4154 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4160 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4163 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4166 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4170 err = mlx4_adjust_port(dev, slave, gid, prot);
4174 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
4178 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4180 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4183 put_res(dev, slave, qpn, RES_QP);
4187 qp_detach(dev, &qp, gid, prot, type, reg_id);
4189 put_res(dev, slave, qpn, RES_QP);
4194 * MAC validation for Flow Steering rules.
4195 * VF can attach rules only with a mac address which is assigned to it.
4197 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4198 struct list_head *rlist)
4200 struct mac_res *res, *tmp;
4203 /* make sure it isn't multicast or broadcast mac*/
4204 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4205 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4206 list_for_each_entry_safe(res, tmp, rlist, list) {
4207 be_mac = cpu_to_be64(res->mac << 16);
4208 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4211 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4212 eth_header->eth.dst_mac, slave);
4219 * In case of missing eth header, append eth header with a MAC address
4220 * assigned to the VF.
4222 static int add_eth_header(struct mlx4_dev *dev, int slave,
4223 struct mlx4_cmd_mailbox *inbox,
4224 struct list_head *rlist, int header_id)
4226 struct mac_res *res, *tmp;
4228 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4229 struct mlx4_net_trans_rule_hw_eth *eth_header;
4230 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4231 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4233 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4235 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4237 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4239 /* Clear a space in the inbox for eth header */
4240 switch (header_id) {
4241 case MLX4_NET_TRANS_RULE_ID_IPV4:
4243 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4244 memmove(ip_header, eth_header,
4245 sizeof(*ip_header) + sizeof(*l4_header));
4247 case MLX4_NET_TRANS_RULE_ID_TCP:
4248 case MLX4_NET_TRANS_RULE_ID_UDP:
4249 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4251 memmove(l4_header, eth_header, sizeof(*l4_header));
4256 list_for_each_entry_safe(res, tmp, rlist, list) {
4257 if (port == res->port) {
4258 be_mac = cpu_to_be64(res->mac << 16);
4263 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4268 memset(eth_header, 0, sizeof(*eth_header));
4269 eth_header->size = sizeof(*eth_header) >> 2;
4270 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4271 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4272 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4278 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4279 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4280 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4281 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4282 struct mlx4_vhcr *vhcr,
4283 struct mlx4_cmd_mailbox *inbox,
4284 struct mlx4_cmd_mailbox *outbox,
4285 struct mlx4_cmd_info *cmd_info)
4288 u32 qpn = vhcr->in_modifier & 0xffffff;
4292 u64 pri_addr_path_mask;
4293 struct mlx4_update_qp_context *cmd;
4296 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4298 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4299 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4300 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4303 if ((pri_addr_path_mask &
4304 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4305 !(dev->caps.flags2 &
4306 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4307 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4312 /* Just change the smac for the QP */
4313 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4315 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4319 port = (rqp->sched_queue >> 6 & 1) + 1;
4321 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4322 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4323 err = mac_find_smac_ix_in_slave(dev, slave, port,
4327 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4333 err = mlx4_cmd(dev, inbox->dma,
4334 vhcr->in_modifier, 0,
4335 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4338 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4343 put_res(dev, slave, qpn, RES_QP);
4347 static u32 qp_attach_mbox_size(void *mbox)
4349 u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4350 struct _rule_hw *rule_header;
4352 rule_header = (struct _rule_hw *)(mbox + size);
4354 while (rule_header->size) {
4355 size += rule_header->size * sizeof(u32);
4361 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4363 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4364 struct mlx4_vhcr *vhcr,
4365 struct mlx4_cmd_mailbox *inbox,
4366 struct mlx4_cmd_mailbox *outbox,
4367 struct mlx4_cmd_info *cmd)
4370 struct mlx4_priv *priv = mlx4_priv(dev);
4371 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4372 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4376 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4377 struct _rule_hw *rule_header;
4379 struct res_fs_rule *rrule;
4382 if (dev->caps.steering_mode !=
4383 MLX4_STEERING_MODE_DEVICE_MANAGED)
4386 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4387 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4391 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4392 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4394 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4397 rule_header = (struct _rule_hw *)(ctrl + 1);
4398 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4400 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4401 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
4403 switch (header_id) {
4404 case MLX4_NET_TRANS_RULE_ID_ETH:
4405 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4410 case MLX4_NET_TRANS_RULE_ID_IB:
4412 case MLX4_NET_TRANS_RULE_ID_IPV4:
4413 case MLX4_NET_TRANS_RULE_ID_TCP:
4414 case MLX4_NET_TRANS_RULE_ID_UDP:
4415 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4416 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4420 vhcr->in_modifier +=
4421 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4424 pr_err("Corrupted mailbox\n");
4429 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4430 vhcr->in_modifier, 0,
4431 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4437 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4439 mlx4_err(dev, "Fail to add flow steering resources\n");
4443 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4447 mbox_size = qp_attach_mbox_size(inbox->buf);
4448 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4449 if (!rrule->mirr_mbox) {
4453 rrule->mirr_mbox_size = mbox_size;
4454 rrule->mirr_rule_id = 0;
4455 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4457 /* set different port */
4458 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4459 if (ctrl->port == 1)
4464 if (mlx4_is_bonded(dev))
4465 mlx4_do_mirror_rule(dev, rrule);
4467 atomic_inc(&rqp->ref_count);
4470 put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4472 /* detach rule on error */
4474 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4475 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4478 put_res(dev, slave, qpn, RES_QP);
4482 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4486 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4488 mlx4_err(dev, "Fail to remove flow steering resources\n");
4492 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4493 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4497 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4498 struct mlx4_vhcr *vhcr,
4499 struct mlx4_cmd_mailbox *inbox,
4500 struct mlx4_cmd_mailbox *outbox,
4501 struct mlx4_cmd_info *cmd)
4505 struct res_fs_rule *rrule;
4509 if (dev->caps.steering_mode !=
4510 MLX4_STEERING_MODE_DEVICE_MANAGED)
4513 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4517 if (!rrule->mirr_mbox) {
4518 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4519 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4522 mirr_reg_id = rrule->mirr_rule_id;
4523 kfree(rrule->mirr_mbox);
4526 /* Release the rule form busy state before removal */
4527 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4528 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4532 if (mirr_reg_id && mlx4_is_bonded(dev)) {
4533 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4535 mlx4_err(dev, "Fail to get resource of mirror rule\n");
4537 put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4538 mlx4_undo_mirror_rule(dev, rrule);
4541 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4543 mlx4_err(dev, "Fail to remove flow steering resources\n");
4547 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4548 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4551 atomic_dec(&rqp->ref_count);
4553 put_res(dev, slave, qpn, RES_QP);
4558 BUSY_MAX_RETRIES = 10
4561 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4562 struct mlx4_vhcr *vhcr,
4563 struct mlx4_cmd_mailbox *inbox,
4564 struct mlx4_cmd_mailbox *outbox,
4565 struct mlx4_cmd_info *cmd)
4568 int index = vhcr->in_modifier & 0xffff;
4570 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4574 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4575 put_res(dev, slave, index, RES_COUNTER);
4579 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4581 struct res_gid *rgid;
4582 struct res_gid *tmp;
4583 struct mlx4_qp qp; /* dummy for calling attach/detach */
4585 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4586 switch (dev->caps.steering_mode) {
4587 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4588 mlx4_flow_detach(dev, rgid->reg_id);
4590 case MLX4_STEERING_MODE_B0:
4591 qp.qpn = rqp->local_qpn;
4592 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4593 rgid->prot, rgid->steer);
4596 list_del(&rgid->list);
4601 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4602 enum mlx4_resource type, int print)
4604 struct mlx4_priv *priv = mlx4_priv(dev);
4605 struct mlx4_resource_tracker *tracker =
4606 &priv->mfunc.master.res_tracker;
4607 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4608 struct res_common *r;
4609 struct res_common *tmp;
4613 spin_lock_irq(mlx4_tlock(dev));
4614 list_for_each_entry_safe(r, tmp, rlist, list) {
4615 if (r->owner == slave) {
4617 if (r->state == RES_ANY_BUSY) {
4620 "%s id 0x%llx is busy\n",
4625 r->from_state = r->state;
4626 r->state = RES_ANY_BUSY;
4632 spin_unlock_irq(mlx4_tlock(dev));
4637 static int move_all_busy(struct mlx4_dev *dev, int slave,
4638 enum mlx4_resource type)
4640 unsigned long begin;
4645 busy = _move_all_busy(dev, slave, type, 0);
4646 if (time_after(jiffies, begin + 5 * HZ))
4653 busy = _move_all_busy(dev, slave, type, 1);
4657 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4659 struct mlx4_priv *priv = mlx4_priv(dev);
4660 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4661 struct list_head *qp_list =
4662 &tracker->slave_list[slave].res_list[RES_QP];
4670 err = move_all_busy(dev, slave, RES_QP);
4672 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4675 spin_lock_irq(mlx4_tlock(dev));
4676 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4677 spin_unlock_irq(mlx4_tlock(dev));
4678 if (qp->com.owner == slave) {
4679 qpn = qp->com.res_id;
4680 detach_qp(dev, slave, qp);
4681 state = qp->com.from_state;
4682 while (state != 0) {
4684 case RES_QP_RESERVED:
4685 spin_lock_irq(mlx4_tlock(dev));
4686 rb_erase(&qp->com.node,
4687 &tracker->res_tree[RES_QP]);
4688 list_del(&qp->com.list);
4689 spin_unlock_irq(mlx4_tlock(dev));
4690 if (!valid_reserved(dev, slave, qpn)) {
4691 __mlx4_qp_release_range(dev, qpn, 1);
4692 mlx4_release_resource(dev, slave,
4699 if (!valid_reserved(dev, slave, qpn))
4700 __mlx4_qp_free_icm(dev, qpn);
4701 state = RES_QP_RESERVED;
4705 err = mlx4_cmd(dev, in_param,
4708 MLX4_CMD_TIME_CLASS_A,
4711 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4712 slave, qp->local_qpn);
4713 atomic_dec(&qp->rcq->ref_count);
4714 atomic_dec(&qp->scq->ref_count);
4715 atomic_dec(&qp->mtt->ref_count);
4717 atomic_dec(&qp->srq->ref_count);
4718 state = RES_QP_MAPPED;
4725 spin_lock_irq(mlx4_tlock(dev));
4727 spin_unlock_irq(mlx4_tlock(dev));
4730 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4732 struct mlx4_priv *priv = mlx4_priv(dev);
4733 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4734 struct list_head *srq_list =
4735 &tracker->slave_list[slave].res_list[RES_SRQ];
4736 struct res_srq *srq;
4737 struct res_srq *tmp;
4744 err = move_all_busy(dev, slave, RES_SRQ);
4746 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4749 spin_lock_irq(mlx4_tlock(dev));
4750 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4751 spin_unlock_irq(mlx4_tlock(dev));
4752 if (srq->com.owner == slave) {
4753 srqn = srq->com.res_id;
4754 state = srq->com.from_state;
4755 while (state != 0) {
4757 case RES_SRQ_ALLOCATED:
4758 __mlx4_srq_free_icm(dev, srqn);
4759 spin_lock_irq(mlx4_tlock(dev));
4760 rb_erase(&srq->com.node,
4761 &tracker->res_tree[RES_SRQ]);
4762 list_del(&srq->com.list);
4763 spin_unlock_irq(mlx4_tlock(dev));
4764 mlx4_release_resource(dev, slave,
4772 err = mlx4_cmd(dev, in_param, srqn, 1,
4774 MLX4_CMD_TIME_CLASS_A,
4777 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4780 atomic_dec(&srq->mtt->ref_count);
4782 atomic_dec(&srq->cq->ref_count);
4783 state = RES_SRQ_ALLOCATED;
4791 spin_lock_irq(mlx4_tlock(dev));
4793 spin_unlock_irq(mlx4_tlock(dev));
4796 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4798 struct mlx4_priv *priv = mlx4_priv(dev);
4799 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4800 struct list_head *cq_list =
4801 &tracker->slave_list[slave].res_list[RES_CQ];
4810 err = move_all_busy(dev, slave, RES_CQ);
4812 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4815 spin_lock_irq(mlx4_tlock(dev));
4816 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4817 spin_unlock_irq(mlx4_tlock(dev));
4818 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4819 cqn = cq->com.res_id;
4820 state = cq->com.from_state;
4821 while (state != 0) {
4823 case RES_CQ_ALLOCATED:
4824 __mlx4_cq_free_icm(dev, cqn);
4825 spin_lock_irq(mlx4_tlock(dev));
4826 rb_erase(&cq->com.node,
4827 &tracker->res_tree[RES_CQ]);
4828 list_del(&cq->com.list);
4829 spin_unlock_irq(mlx4_tlock(dev));
4830 mlx4_release_resource(dev, slave,
4838 err = mlx4_cmd(dev, in_param, cqn, 1,
4840 MLX4_CMD_TIME_CLASS_A,
4843 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4845 atomic_dec(&cq->mtt->ref_count);
4846 state = RES_CQ_ALLOCATED;
4854 spin_lock_irq(mlx4_tlock(dev));
4856 spin_unlock_irq(mlx4_tlock(dev));
4859 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4861 struct mlx4_priv *priv = mlx4_priv(dev);
4862 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4863 struct list_head *mpt_list =
4864 &tracker->slave_list[slave].res_list[RES_MPT];
4865 struct res_mpt *mpt;
4866 struct res_mpt *tmp;
4873 err = move_all_busy(dev, slave, RES_MPT);
4875 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4878 spin_lock_irq(mlx4_tlock(dev));
4879 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4880 spin_unlock_irq(mlx4_tlock(dev));
4881 if (mpt->com.owner == slave) {
4882 mptn = mpt->com.res_id;
4883 state = mpt->com.from_state;
4884 while (state != 0) {
4886 case RES_MPT_RESERVED:
4887 __mlx4_mpt_release(dev, mpt->key);
4888 spin_lock_irq(mlx4_tlock(dev));
4889 rb_erase(&mpt->com.node,
4890 &tracker->res_tree[RES_MPT]);
4891 list_del(&mpt->com.list);
4892 spin_unlock_irq(mlx4_tlock(dev));
4893 mlx4_release_resource(dev, slave,
4899 case RES_MPT_MAPPED:
4900 __mlx4_mpt_free_icm(dev, mpt->key);
4901 state = RES_MPT_RESERVED;
4906 err = mlx4_cmd(dev, in_param, mptn, 0,
4908 MLX4_CMD_TIME_CLASS_A,
4911 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4914 atomic_dec(&mpt->mtt->ref_count);
4915 state = RES_MPT_MAPPED;
4922 spin_lock_irq(mlx4_tlock(dev));
4924 spin_unlock_irq(mlx4_tlock(dev));
4927 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4929 struct mlx4_priv *priv = mlx4_priv(dev);
4930 struct mlx4_resource_tracker *tracker =
4931 &priv->mfunc.master.res_tracker;
4932 struct list_head *mtt_list =
4933 &tracker->slave_list[slave].res_list[RES_MTT];
4934 struct res_mtt *mtt;
4935 struct res_mtt *tmp;
4941 err = move_all_busy(dev, slave, RES_MTT);
4943 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4946 spin_lock_irq(mlx4_tlock(dev));
4947 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4948 spin_unlock_irq(mlx4_tlock(dev));
4949 if (mtt->com.owner == slave) {
4950 base = mtt->com.res_id;
4951 state = mtt->com.from_state;
4952 while (state != 0) {
4954 case RES_MTT_ALLOCATED:
4955 __mlx4_free_mtt_range(dev, base,
4957 spin_lock_irq(mlx4_tlock(dev));
4958 rb_erase(&mtt->com.node,
4959 &tracker->res_tree[RES_MTT]);
4960 list_del(&mtt->com.list);
4961 spin_unlock_irq(mlx4_tlock(dev));
4962 mlx4_release_resource(dev, slave, RES_MTT,
4963 1 << mtt->order, 0);
4973 spin_lock_irq(mlx4_tlock(dev));
4975 spin_unlock_irq(mlx4_tlock(dev));
4978 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4980 struct mlx4_cmd_mailbox *mailbox;
4982 struct res_fs_rule *mirr_rule;
4985 mailbox = mlx4_alloc_cmd_mailbox(dev);
4986 if (IS_ERR(mailbox))
4987 return PTR_ERR(mailbox);
4989 if (!fs_rule->mirr_mbox) {
4990 mlx4_err(dev, "rule mirroring mailbox is null\n");
4991 mlx4_free_cmd_mailbox(dev, mailbox);
4994 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4995 err = mlx4_cmd_imm(dev, mailbox->dma, ®_id, fs_rule->mirr_mbox_size >> 2, 0,
4996 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4998 mlx4_free_cmd_mailbox(dev, mailbox);
5003 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
5007 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
5011 fs_rule->mirr_rule_id = reg_id;
5012 mirr_rule->mirr_rule_id = 0;
5013 mirr_rule->mirr_mbox_size = 0;
5014 mirr_rule->mirr_mbox = NULL;
5015 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
5019 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
5021 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
5022 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
5027 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
5029 struct mlx4_priv *priv = mlx4_priv(dev);
5030 struct mlx4_resource_tracker *tracker =
5031 &priv->mfunc.master.res_tracker;
5032 struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
5034 struct res_fs_rule *fs_rule;
5036 LIST_HEAD(mirr_list);
5038 for (p = rb_first(root); p; p = rb_next(p)) {
5039 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5040 if ((bond && fs_rule->mirr_mbox_size) ||
5041 (!bond && !fs_rule->mirr_mbox_size))
5042 list_add_tail(&fs_rule->mirr_list, &mirr_list);
5045 list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5047 err += mlx4_do_mirror_rule(dev, fs_rule);
5049 err += mlx4_undo_mirror_rule(dev, fs_rule);
5054 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5056 return mlx4_mirror_fs_rules(dev, true);
5059 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5061 return mlx4_mirror_fs_rules(dev, false);
5064 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5066 struct mlx4_priv *priv = mlx4_priv(dev);
5067 struct mlx4_resource_tracker *tracker =
5068 &priv->mfunc.master.res_tracker;
5069 struct list_head *fs_rule_list =
5070 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5071 struct res_fs_rule *fs_rule;
5072 struct res_fs_rule *tmp;
5077 err = move_all_busy(dev, slave, RES_FS_RULE);
5079 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5082 spin_lock_irq(mlx4_tlock(dev));
5083 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5084 spin_unlock_irq(mlx4_tlock(dev));
5085 if (fs_rule->com.owner == slave) {
5086 base = fs_rule->com.res_id;
5087 state = fs_rule->com.from_state;
5088 while (state != 0) {
5090 case RES_FS_RULE_ALLOCATED:
5092 err = mlx4_cmd(dev, base, 0, 0,
5093 MLX4_QP_FLOW_STEERING_DETACH,
5094 MLX4_CMD_TIME_CLASS_A,
5097 spin_lock_irq(mlx4_tlock(dev));
5098 rb_erase(&fs_rule->com.node,
5099 &tracker->res_tree[RES_FS_RULE]);
5100 list_del(&fs_rule->com.list);
5101 spin_unlock_irq(mlx4_tlock(dev));
5102 kfree(fs_rule->mirr_mbox);
5112 spin_lock_irq(mlx4_tlock(dev));
5114 spin_unlock_irq(mlx4_tlock(dev));
5117 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5119 struct mlx4_priv *priv = mlx4_priv(dev);
5120 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5121 struct list_head *eq_list =
5122 &tracker->slave_list[slave].res_list[RES_EQ];
5130 err = move_all_busy(dev, slave, RES_EQ);
5132 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5135 spin_lock_irq(mlx4_tlock(dev));
5136 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5137 spin_unlock_irq(mlx4_tlock(dev));
5138 if (eq->com.owner == slave) {
5139 eqn = eq->com.res_id;
5140 state = eq->com.from_state;
5141 while (state != 0) {
5143 case RES_EQ_RESERVED:
5144 spin_lock_irq(mlx4_tlock(dev));
5145 rb_erase(&eq->com.node,
5146 &tracker->res_tree[RES_EQ]);
5147 list_del(&eq->com.list);
5148 spin_unlock_irq(mlx4_tlock(dev));
5154 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5155 1, MLX4_CMD_HW2SW_EQ,
5156 MLX4_CMD_TIME_CLASS_A,
5159 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5160 slave, eqn & 0x3ff);
5161 atomic_dec(&eq->mtt->ref_count);
5162 state = RES_EQ_RESERVED;
5170 spin_lock_irq(mlx4_tlock(dev));
5172 spin_unlock_irq(mlx4_tlock(dev));
5175 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5177 struct mlx4_priv *priv = mlx4_priv(dev);
5178 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5179 struct list_head *counter_list =
5180 &tracker->slave_list[slave].res_list[RES_COUNTER];
5181 struct res_counter *counter;
5182 struct res_counter *tmp;
5184 int *counters_arr = NULL;
5187 err = move_all_busy(dev, slave, RES_COUNTER);
5189 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5192 counters_arr = kmalloc_array(dev->caps.max_counters,
5193 sizeof(*counters_arr), GFP_KERNEL);
5200 spin_lock_irq(mlx4_tlock(dev));
5201 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5202 if (counter->com.owner == slave) {
5203 counters_arr[i++] = counter->com.res_id;
5204 rb_erase(&counter->com.node,
5205 &tracker->res_tree[RES_COUNTER]);
5206 list_del(&counter->com.list);
5210 spin_unlock_irq(mlx4_tlock(dev));
5213 __mlx4_counter_free(dev, counters_arr[j++]);
5214 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5218 kfree(counters_arr);
5221 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5223 struct mlx4_priv *priv = mlx4_priv(dev);
5224 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5225 struct list_head *xrcdn_list =
5226 &tracker->slave_list[slave].res_list[RES_XRCD];
5227 struct res_xrcdn *xrcd;
5228 struct res_xrcdn *tmp;
5232 err = move_all_busy(dev, slave, RES_XRCD);
5234 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5237 spin_lock_irq(mlx4_tlock(dev));
5238 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5239 if (xrcd->com.owner == slave) {
5240 xrcdn = xrcd->com.res_id;
5241 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5242 list_del(&xrcd->com.list);
5244 __mlx4_xrcd_free(dev, xrcdn);
5247 spin_unlock_irq(mlx4_tlock(dev));
5250 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5252 struct mlx4_priv *priv = mlx4_priv(dev);
5253 mlx4_reset_roce_gids(dev, slave);
5254 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5255 rem_slave_vlans(dev, slave);
5256 rem_slave_macs(dev, slave);
5257 rem_slave_fs_rule(dev, slave);
5258 rem_slave_qps(dev, slave);
5259 rem_slave_srqs(dev, slave);
5260 rem_slave_cqs(dev, slave);
5261 rem_slave_mrs(dev, slave);
5262 rem_slave_eqs(dev, slave);
5263 rem_slave_mtts(dev, slave);
5264 rem_slave_counters(dev, slave);
5265 rem_slave_xrcdns(dev, slave);
5266 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5269 static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5270 struct mlx4_vf_immed_vlan_work *work)
5272 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5273 ctx->qp_context.qos_vport = work->qos_vport;
5276 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5278 struct mlx4_vf_immed_vlan_work *work =
5279 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5280 struct mlx4_cmd_mailbox *mailbox;
5281 struct mlx4_update_qp_context *upd_context;
5282 struct mlx4_dev *dev = &work->priv->dev;
5283 struct mlx4_resource_tracker *tracker =
5284 &work->priv->mfunc.master.res_tracker;
5285 struct list_head *qp_list =
5286 &tracker->slave_list[work->slave].res_list[RES_QP];
5289 u64 qp_path_mask_vlan_ctrl =
5290 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5291 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5292 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5293 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5294 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5295 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5297 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5298 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5299 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5300 (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5301 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5302 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5303 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5304 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5307 int port, errors = 0;
5310 if (mlx4_is_slave(dev)) {
5311 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5316 mailbox = mlx4_alloc_cmd_mailbox(dev);
5317 if (IS_ERR(mailbox))
5319 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5320 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5321 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5322 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5323 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5324 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5325 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5326 else if (!work->vlan_id)
5327 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5328 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5329 else if (work->vlan_proto == htons(ETH_P_8021AD))
5330 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5331 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5332 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5333 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5334 else /* vst 802.1Q */
5335 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5336 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5337 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5339 upd_context = mailbox->buf;
5340 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5342 spin_lock_irq(mlx4_tlock(dev));
5343 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5344 spin_unlock_irq(mlx4_tlock(dev));
5345 if (qp->com.owner == work->slave) {
5346 if (qp->com.from_state != RES_QP_HW ||
5347 !qp->sched_queue || /* no INIT2RTR trans yet */
5348 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5349 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5350 spin_lock_irq(mlx4_tlock(dev));
5353 port = (qp->sched_queue >> 6 & 1) + 1;
5354 if (port != work->port) {
5355 spin_lock_irq(mlx4_tlock(dev));
5358 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5359 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5361 upd_context->primary_addr_path_mask =
5362 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5363 if (work->vlan_id == MLX4_VGT) {
5364 upd_context->qp_context.param3 = qp->param3;
5365 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5366 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5367 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5368 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5369 upd_context->qp_context.pri_path.feup = qp->feup;
5370 upd_context->qp_context.pri_path.sched_queue =
5373 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5374 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5375 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5376 upd_context->qp_context.pri_path.fvl_rx =
5377 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5378 upd_context->qp_context.pri_path.fl =
5379 qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5380 if (work->vlan_proto == htons(ETH_P_8021AD))
5381 upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5383 upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5384 upd_context->qp_context.pri_path.feup =
5385 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5386 upd_context->qp_context.pri_path.sched_queue =
5387 qp->sched_queue & 0xC7;
5388 upd_context->qp_context.pri_path.sched_queue |=
5389 ((work->qos & 0x7) << 3);
5391 if (dev->caps.flags2 &
5392 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5393 update_qos_vpp(upd_context, work);
5396 err = mlx4_cmd(dev, mailbox->dma,
5397 qp->local_qpn & 0xffffff,
5398 0, MLX4_CMD_UPDATE_QP,
5399 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5401 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5402 work->slave, port, qp->local_qpn, err);
5406 spin_lock_irq(mlx4_tlock(dev));
5408 spin_unlock_irq(mlx4_tlock(dev));
5409 mlx4_free_cmd_mailbox(dev, mailbox);
5412 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5413 errors, work->slave, work->port);
5415 /* unregister previous vlan_id if needed and we had no errors
5416 * while updating the QPs
5418 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5419 NO_INDX != work->orig_vlan_ix)
5420 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5421 work->orig_vlan_id);