2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_sa.h>
36 #include <rdma/ib_cache.h>
38 #include <linux/random.h>
39 #include <linux/mlx4/cmd.h>
40 #include <linux/gfp.h>
41 #include <rdma/ib_pma.h>
45 #include <linux/mlx4/driver.h>
49 MLX4_IB_VENDOR_CLASS1 = 0x9,
50 MLX4_IB_VENDOR_CLASS2 = 0xa
53 #define MLX4_TUN_SEND_WRID_SHIFT 34
54 #define MLX4_TUN_QPN_SHIFT 32
55 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
56 #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
58 #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
59 #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
61 /* Port mgmt change event handling */
63 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
64 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
65 #define NUM_IDX_IN_PKEY_TBL_BLK 32
66 #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
67 #define GUID_TBL_BLK_NUM_ENTRIES 8
68 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
70 struct mlx4_mad_rcv_buf {
75 struct mlx4_mad_snd_buf {
79 struct mlx4_tunnel_mad {
81 struct mlx4_ib_tunnel_header hdr;
85 struct mlx4_rcv_tunnel_mad {
86 struct mlx4_rcv_tunnel_hdr hdr;
91 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
92 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
93 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
94 int block, u32 change_bitmap);
96 __be64 mlx4_ib_gen_node_guid(void)
98 #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
99 return cpu_to_be64(NODE_GUID_HI | prandom_u32());
102 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
104 return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
105 cpu_to_be64(0xff00000000000000LL);
108 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
109 int port, const struct ib_wc *in_wc,
110 const struct ib_grh *in_grh,
111 const void *in_mad, void *response_mad)
113 struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
116 u32 in_modifier = port;
119 inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
120 if (IS_ERR(inmailbox))
121 return PTR_ERR(inmailbox);
122 inbox = inmailbox->buf;
124 outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
125 if (IS_ERR(outmailbox)) {
126 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
127 return PTR_ERR(outmailbox);
130 memcpy(inbox, in_mad, 256);
133 * Key check traps can't be generated unless we have in_wc to
134 * tell us where to send the trap.
136 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
138 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
140 if (mlx4_is_mfunc(dev->dev) &&
141 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
157 memset(inbox + 256, 0, 256);
158 ext_info = inbox + 256;
160 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
161 ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
162 ext_info->sl = in_wc->sl << 4;
163 ext_info->g_path = in_wc->dlid_path_bits |
164 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
165 ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
168 memcpy(ext_info->grh, in_grh, 40);
172 in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
175 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
176 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
177 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
178 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
181 memcpy(response_mad, outmailbox->buf, 256);
183 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
184 mlx4_free_cmd_mailbox(dev->dev, outmailbox);
189 static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
191 struct ib_ah *new_ah;
192 struct rdma_ah_attr ah_attr;
195 if (!dev->send_agent[port_num - 1][0])
198 memset(&ah_attr, 0, sizeof ah_attr);
199 ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num);
200 rdma_ah_set_dlid(&ah_attr, lid);
201 rdma_ah_set_sl(&ah_attr, sl);
202 rdma_ah_set_port_num(&ah_attr, port_num);
204 new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
209 spin_lock_irqsave(&dev->sm_lock, flags);
210 if (dev->sm_ah[port_num - 1])
211 rdma_destroy_ah(dev->sm_ah[port_num - 1]);
212 dev->sm_ah[port_num - 1] = new_ah;
213 spin_unlock_irqrestore(&dev->sm_lock, flags);
217 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
218 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
220 static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
223 struct ib_port_info *pinfo;
226 u32 bn, pkey_change_bitmap;
230 struct mlx4_ib_dev *dev = to_mdev(ibdev);
231 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
232 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
233 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
234 switch (mad->mad_hdr.attr_id) {
235 case IB_SMP_ATTR_PORT_INFO:
236 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
238 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
239 lid = be16_to_cpu(pinfo->lid);
241 update_sm_ah(dev, port_num,
242 be16_to_cpu(pinfo->sm_lid),
243 pinfo->neighbormtu_mastersmsl & 0xf);
245 if (pinfo->clientrereg_resv_subnetto & 0x80)
246 handle_client_rereg_event(dev, port_num);
249 handle_lid_change_event(dev, port_num);
252 case IB_SMP_ATTR_PKEY_TABLE:
253 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
255 if (!mlx4_is_mfunc(dev->dev)) {
256 mlx4_ib_dispatch_event(dev, port_num,
257 IB_EVENT_PKEY_CHANGE);
261 /* at this point, we are running in the master.
262 * Slaves do not receive SMPs.
264 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
265 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
266 pkey_change_bitmap = 0;
267 for (i = 0; i < 32; i++) {
268 pr_debug("PKEY[%d] = x%x\n",
269 i + bn*32, be16_to_cpu(base[i]));
270 if (be16_to_cpu(base[i]) !=
271 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
272 pkey_change_bitmap |= (1 << i);
273 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
274 be16_to_cpu(base[i]);
277 pr_debug("PKEY Change event: port=%d, "
278 "block=0x%x, change_bitmap=0x%x\n",
279 port_num, bn, pkey_change_bitmap);
281 if (pkey_change_bitmap) {
282 mlx4_ib_dispatch_event(dev, port_num,
283 IB_EVENT_PKEY_CHANGE);
284 if (!dev->sriov.is_going_down)
285 __propagate_pkey_ev(dev, port_num, bn,
290 case IB_SMP_ATTR_GUID_INFO:
291 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
293 /* paravirtualized master's guid is guid 0 -- does not change */
294 if (!mlx4_is_master(dev->dev))
295 mlx4_ib_dispatch_event(dev, port_num,
296 IB_EVENT_GID_CHANGE);
297 /*if master, notify relevant slaves*/
298 if (mlx4_is_master(dev->dev) &&
299 !dev->sriov.is_going_down) {
300 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
301 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
302 (u8 *)(&((struct ib_smp *)mad)->data));
303 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
304 (u8 *)(&((struct ib_smp *)mad)->data));
308 case IB_SMP_ATTR_SL_TO_VL_TABLE:
309 /* cache sl to vl mapping changes for use in
310 * filling QP1 LRH VL field when sending packets
312 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
313 dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
315 if (!mlx4_is_slave(dev->dev)) {
316 union sl2vl_tbl_to_u64 sl2vl64;
319 for (jj = 0; jj < 8; jj++) {
320 sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
321 pr_debug("port %u, sl2vl[%d] = %02x\n",
322 port_num, jj, sl2vl64.sl8[jj]);
324 atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
333 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
334 int block, u32 change_bitmap)
336 int i, ix, slave, err;
339 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
340 if (slave == mlx4_master_func_num(dev->dev))
342 if (!mlx4_is_slave_active(dev->dev, slave))
346 for (i = 0; i < 32; i++) {
347 if (!(change_bitmap & (1 << i)))
350 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
351 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
352 [ix] == i + 32 * block) {
353 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
354 pr_debug("propagate_pkey_ev: slave %d,"
355 " port %d, ix %d (%d)\n",
356 slave, port_num, ix, err);
367 static void node_desc_override(struct ib_device *dev,
372 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
373 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
374 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
375 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
376 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
377 memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
378 IB_DEVICE_NODE_DESC_MAX);
379 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
383 static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
385 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
386 struct ib_mad_send_buf *send_buf;
387 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
392 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
393 IB_MGMT_MAD_DATA, GFP_ATOMIC,
394 IB_MGMT_BASE_VERSION);
395 if (IS_ERR(send_buf))
398 * We rely here on the fact that MLX QPs don't use the
399 * address handle after the send is posted (this is
400 * wrong following the IB spec strictly, but we know
401 * it's OK for our devices).
403 spin_lock_irqsave(&dev->sm_lock, flags);
404 memcpy(send_buf->mad, mad, sizeof *mad);
405 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
406 ret = ib_post_send_mad(send_buf, NULL);
409 spin_unlock_irqrestore(&dev->sm_lock, flags);
412 ib_free_send_mad(send_buf);
416 static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
417 struct ib_sa_mad *sa_mad)
421 /* dispatch to different sa handlers */
422 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
423 case IB_SA_ATTR_MC_MEMBER_REC:
424 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
432 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
434 struct mlx4_ib_dev *dev = to_mdev(ibdev);
437 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
438 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
445 static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
446 u8 port, u16 pkey, u16 *ix)
449 u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
452 if (slave == mlx4_master_func_num(dev->dev))
453 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
455 unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
457 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
458 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
461 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
463 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
466 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
467 if (slot_pkey & 0x8000) {
471 /* take first partial pkey index found */
472 if (partial_ix == 0xFF)
473 partial_ix = pkey_ix;
478 if (partial_ix < 0xFF) {
479 *ix = (u16) partial_ix;
486 static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
489 int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
490 enum rdma_network_type net_type;
493 net_type = RDMA_NETWORK_IPV4;
494 else if (version == 6)
495 net_type = RDMA_NETWORK_IPV6;
499 return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
503 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
504 enum ib_qp_type dest_qpt, struct ib_wc *wc,
505 struct ib_grh *grh, struct ib_mad *mad)
509 struct ib_send_wr *bad_wr;
510 struct mlx4_ib_demux_pv_ctx *tun_ctx;
511 struct mlx4_ib_demux_pv_qp *tun_qp;
512 struct mlx4_rcv_tunnel_mad *tun_mad;
513 struct rdma_ah_attr attr;
515 struct ib_qp *src_qp = NULL;
516 unsigned tun_tx_ix = 0;
521 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
523 if (dest_qpt > IB_QPT_GSI)
526 tun_ctx = dev->sriov.demux[port-1].tun[slave];
528 /* check if proxy qp created */
529 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
533 tun_qp = &tun_ctx->qp[0];
535 tun_qp = &tun_ctx->qp[1];
537 /* compute P_Key index to put in tunnel header for slave */
540 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
544 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
547 tun_pkey_ix = pkey_ix;
549 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
551 dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
553 /* get tunnel tx data buf for slave */
556 /* create ah. Just need an empty one with the port num for the post send.
557 * The driver will set the force loopback bit in post_send */
558 memset(&attr, 0, sizeof attr);
559 attr.type = rdma_ah_find_type(&dev->ib_dev, port);
561 rdma_ah_set_port_num(&attr, port);
566 if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
568 rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0);
570 ah = rdma_create_ah(tun_ctx->pd, &attr);
574 /* allocate tunnel tx buf after pass failure returns */
575 spin_lock(&tun_qp->tx_lock);
576 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
577 (MLX4_NUM_TUNNEL_BUFS - 1))
580 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
581 spin_unlock(&tun_qp->tx_lock);
585 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
586 if (tun_qp->tx_ring[tun_tx_ix].ah)
587 rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
588 tun_qp->tx_ring[tun_tx_ix].ah = ah;
589 ib_dma_sync_single_for_cpu(&dev->ib_dev,
590 tun_qp->tx_ring[tun_tx_ix].buf.map,
591 sizeof (struct mlx4_rcv_tunnel_mad),
594 /* copy over to tunnel buffer */
596 memcpy(&tun_mad->grh, grh, sizeof *grh);
597 memcpy(&tun_mad->mad, mad, sizeof *mad);
599 /* adjust tunnel data */
600 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
601 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
602 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
606 if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
609 if (vlan != wc->vlan_id)
610 /* Packet vlan is not the VST-assigned vlan.
615 /* Remove the vlan tag before forwarding
616 * the packet to the VF.
623 tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
624 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
625 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
627 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
628 tun_mad->hdr.slid_mac_47_32 = ib_lid_be16(wc->slid);
631 ib_dma_sync_single_for_device(&dev->ib_dev,
632 tun_qp->tx_ring[tun_tx_ix].buf.map,
633 sizeof (struct mlx4_rcv_tunnel_mad),
636 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
637 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
638 list.lkey = tun_ctx->pd->local_dma_lkey;
642 wr.remote_qkey = IB_QP_SET_QKEY;
643 wr.remote_qpn = dqpn;
645 wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
646 wr.wr.sg_list = &list;
648 wr.wr.opcode = IB_WR_SEND;
649 wr.wr.send_flags = IB_SEND_SIGNALED;
651 ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
655 spin_lock(&tun_qp->tx_lock);
656 tun_qp->tx_ix_tail++;
657 spin_unlock(&tun_qp->tx_lock);
658 tun_qp->tx_ring[tun_tx_ix].ah = NULL;
664 static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
665 struct ib_wc *wc, struct ib_grh *grh,
668 struct mlx4_ib_dev *dev = to_mdev(ibdev);
674 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
683 if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
685 if (!(wc->wc_flags & IB_WC_GRH)) {
686 mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
689 if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
690 mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
693 err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
694 if (err && mlx4_is_mf_bonded(dev->dev)) {
695 other_port = (port == 1) ? 2 : 1;
696 err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
699 pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
700 slave, grh->dgid.raw, port, other_port);
704 mlx4_ib_warn(ibdev, "failed matching grh\n");
707 if (slave >= dev->dev->caps.sqp_demux) {
708 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
709 slave, dev->dev->caps.sqp_demux);
713 if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
716 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
718 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
723 /* Initially assume that this mad is for us */
724 slave = mlx4_master_func_num(dev->dev);
726 /* See if the slave id is encoded in a response mad */
727 if (mad->mad_hdr.method & 0x80) {
728 slave_id = (u8 *) &mad->mad_hdr.tid;
730 if (slave != 255) /*255 indicates the dom0*/
731 *slave_id = 0; /* remap tid */
734 /* If a grh is present, we demux according to it */
735 if (wc->wc_flags & IB_WC_GRH) {
736 if (grh->dgid.global.interface_id ==
737 cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
738 grh->dgid.global.subnet_prefix == cpu_to_be64(
739 atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
742 slave = mlx4_ib_find_real_gid(ibdev, port,
743 grh->dgid.global.interface_id);
745 mlx4_ib_warn(ibdev, "failed matching grh\n");
750 /* Class-specific handling */
751 switch (mad->mad_hdr.mgmt_class) {
752 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
753 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
754 /* 255 indicates the dom0 */
755 if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
756 if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
758 /* for a VF. drop unsolicited MADs */
759 if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
760 mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
761 slave, mad->mad_hdr.mgmt_class,
762 mad->mad_hdr.method);
767 case IB_MGMT_CLASS_SUBN_ADM:
768 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
769 (struct ib_sa_mad *) mad))
772 case IB_MGMT_CLASS_CM:
773 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
776 case IB_MGMT_CLASS_DEVICE_MGMT:
777 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
781 /* Drop unsupported classes for slaves in tunnel mode */
782 if (slave != mlx4_master_func_num(dev->dev)) {
783 pr_debug("dropping unsupported ingress mad from class:%d "
784 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
788 /*make sure that no slave==255 was not handled yet.*/
789 if (slave >= dev->dev->caps.sqp_demux) {
790 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
791 slave, dev->dev->caps.sqp_demux);
795 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
797 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
802 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
803 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
804 const struct ib_mad *in_mad, struct ib_mad *out_mad)
806 u16 slid, prev_lid = 0;
808 struct ib_port_attr pattr;
810 if (in_wc && in_wc->qp->qp_num) {
811 pr_debug("received MAD: slid:%d sqpn:%d "
812 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
813 in_wc->slid, in_wc->src_qp,
814 in_wc->dlid_path_bits,
817 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
818 be16_to_cpu(in_mad->mad_hdr.attr_id));
819 if (in_wc->wc_flags & IB_WC_GRH) {
820 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
821 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
822 be64_to_cpu(in_grh->sgid.global.interface_id));
823 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
824 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
825 be64_to_cpu(in_grh->dgid.global.interface_id));
829 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
831 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
832 forward_trap(to_mdev(ibdev), port_num, in_mad);
833 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
836 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
837 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
838 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
839 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
840 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
841 return IB_MAD_RESULT_SUCCESS;
844 * Don't process SMInfo queries -- the SMA can't handle them.
846 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
847 return IB_MAD_RESULT_SUCCESS;
848 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
849 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
850 in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
851 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
852 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
853 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
854 return IB_MAD_RESULT_SUCCESS;
856 return IB_MAD_RESULT_SUCCESS;
858 if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
859 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
860 in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
861 in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
862 !ib_query_port(ibdev, port_num, &pattr))
863 prev_lid = ib_lid_cpu16(pattr.lid);
865 err = mlx4_MAD_IFC(to_mdev(ibdev),
866 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
867 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
868 MLX4_MAD_IFC_NET_VIEW,
869 port_num, in_wc, in_grh, in_mad, out_mad);
871 return IB_MAD_RESULT_FAILURE;
873 if (!out_mad->mad_hdr.status) {
874 smp_snoop(ibdev, port_num, in_mad, prev_lid);
875 /* slaves get node desc from FW */
876 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
877 node_desc_override(ibdev, out_mad);
880 /* set return bit in status of directed route responses */
881 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
882 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
884 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
885 /* no response for trap repress */
886 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
888 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
891 static void edit_counter(struct mlx4_counter *cnt, void *counters,
895 case IB_PMA_PORT_COUNTERS:
897 struct ib_pma_portcounters *pma_cnt =
898 (struct ib_pma_portcounters *)counters;
900 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
901 (be64_to_cpu(cnt->tx_bytes) >> 2));
902 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
903 (be64_to_cpu(cnt->rx_bytes) >> 2));
904 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
905 be64_to_cpu(cnt->tx_frames));
906 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
907 be64_to_cpu(cnt->rx_frames));
910 case IB_PMA_PORT_COUNTERS_EXT:
912 struct ib_pma_portcounters_ext *pma_cnt_ext =
913 (struct ib_pma_portcounters_ext *)counters;
915 pma_cnt_ext->port_xmit_data =
916 cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
917 pma_cnt_ext->port_rcv_data =
918 cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
919 pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
920 pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
926 static int iboe_process_mad_port_info(void *out_mad)
928 struct ib_class_port_info cpi = {};
930 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
931 memcpy(out_mad, &cpi, sizeof(cpi));
932 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
935 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
936 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
937 const struct ib_mad *in_mad, struct ib_mad *out_mad)
939 struct mlx4_counter counter_stats;
940 struct mlx4_ib_dev *dev = to_mdev(ibdev);
941 struct counter_index *tmp_counter;
942 int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
944 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
947 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
948 return iboe_process_mad_port_info((void *)(out_mad->data + 40));
950 memset(&counter_stats, 0, sizeof(counter_stats));
951 mutex_lock(&dev->counters_table[port_num - 1].mutex);
952 list_for_each_entry(tmp_counter,
953 &dev->counters_table[port_num - 1].counters_list,
955 err = mlx4_get_counter_stats(dev->dev,
959 err = IB_MAD_RESULT_FAILURE;
965 mutex_unlock(&dev->counters_table[port_num - 1].mutex);
967 memset(out_mad->data, 0, sizeof out_mad->data);
968 switch (counter_stats.counter_mode & 0xf) {
970 edit_counter(&counter_stats,
971 (void *)(out_mad->data + 40),
972 in_mad->mad_hdr.attr_id);
973 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
976 err = IB_MAD_RESULT_FAILURE;
983 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
984 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
985 const struct ib_mad_hdr *in, size_t in_mad_size,
986 struct ib_mad_hdr *out, size_t *out_mad_size,
987 u16 *out_mad_pkey_index)
989 struct mlx4_ib_dev *dev = to_mdev(ibdev);
990 const struct ib_mad *in_mad = (const struct ib_mad *)in;
991 struct ib_mad *out_mad = (struct ib_mad *)out;
992 enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
994 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
995 *out_mad_size != sizeof(*out_mad)))
996 return IB_MAD_RESULT_FAILURE;
998 /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
999 * queries, should be called only by VFs and for that specific purpose
1001 if (link == IB_LINK_LAYER_INFINIBAND) {
1002 if (mlx4_is_slave(dev->dev) &&
1003 (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
1004 (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
1005 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
1006 in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
1007 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
1008 in_grh, in_mad, out_mad);
1010 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
1011 in_grh, in_mad, out_mad);
1014 if (link == IB_LINK_LAYER_ETHERNET)
1015 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
1016 in_grh, in_mad, out_mad);
1021 static void send_handler(struct ib_mad_agent *agent,
1022 struct ib_mad_send_wc *mad_send_wc)
1024 if (mad_send_wc->send_buf->context[0])
1025 rdma_destroy_ah(mad_send_wc->send_buf->context[0]);
1026 ib_free_send_mad(mad_send_wc->send_buf);
1029 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
1031 struct ib_mad_agent *agent;
1034 enum rdma_link_layer ll;
1036 for (p = 0; p < dev->num_ports; ++p) {
1037 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
1038 for (q = 0; q <= 1; ++q) {
1039 if (ll == IB_LINK_LAYER_INFINIBAND) {
1040 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
1041 q ? IB_QPT_GSI : IB_QPT_SMI,
1042 NULL, 0, send_handler,
1044 if (IS_ERR(agent)) {
1045 ret = PTR_ERR(agent);
1048 dev->send_agent[p][q] = agent;
1050 dev->send_agent[p][q] = NULL;
1057 for (p = 0; p < dev->num_ports; ++p)
1058 for (q = 0; q <= 1; ++q)
1059 if (dev->send_agent[p][q])
1060 ib_unregister_mad_agent(dev->send_agent[p][q]);
1065 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
1067 struct ib_mad_agent *agent;
1070 for (p = 0; p < dev->num_ports; ++p) {
1071 for (q = 0; q <= 1; ++q) {
1072 agent = dev->send_agent[p][q];
1074 dev->send_agent[p][q] = NULL;
1075 ib_unregister_mad_agent(agent);
1080 rdma_destroy_ah(dev->sm_ah[p]);
1084 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
1086 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
1088 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1089 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1090 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
1093 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
1095 /* re-configure the alias-guid and mcg's */
1096 if (mlx4_is_master(dev->dev)) {
1097 mlx4_ib_invalidate_all_guid_record(dev, port_num);
1099 if (!dev->sriov.is_going_down) {
1100 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
1101 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1102 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
1106 /* Update the sl to vl table from inside client rereg
1107 * only if in secure-host mode (snooping is not possible)
1108 * and the sl-to-vl change event is not generated by FW.
1110 if (!mlx4_is_slave(dev->dev) &&
1111 dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
1112 !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
1113 if (mlx4_is_master(dev->dev))
1114 /* already in work queue from mlx4_ib_event queueing
1115 * mlx4_handle_port_mgmt_change_event, which calls
1116 * this procedure. Therefore, call sl2vl_update directly.
1118 mlx4_ib_sl2vl_update(dev, port_num);
1120 mlx4_sched_ib_sl2vl_update_work(dev, port_num);
1122 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
1125 static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
1126 struct mlx4_eqe *eqe)
1128 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
1129 GET_MASK_FROM_EQE(eqe));
1132 static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
1133 u32 guid_tbl_blk_num, u32 change_bitmap)
1135 struct ib_smp *in_mad = NULL;
1136 struct ib_smp *out_mad = NULL;
1139 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
1142 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
1143 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1144 if (!in_mad || !out_mad)
1147 guid_tbl_blk_num *= 4;
1149 for (i = 0; i < 4; i++) {
1150 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
1152 memset(in_mad, 0, sizeof *in_mad);
1153 memset(out_mad, 0, sizeof *out_mad);
1155 in_mad->base_version = 1;
1156 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1157 in_mad->class_version = 1;
1158 in_mad->method = IB_MGMT_METHOD_GET;
1159 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
1160 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
1162 if (mlx4_MAD_IFC(dev,
1163 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
1164 port_num, NULL, NULL, in_mad, out_mad)) {
1165 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1169 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
1171 (u8 *)(&((struct ib_smp *)out_mad)->data));
1172 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
1174 (u8 *)(&((struct ib_smp *)out_mad)->data));
1183 void handle_port_mgmt_change_event(struct work_struct *work)
1185 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1186 struct mlx4_ib_dev *dev = ew->ib_dev;
1187 struct mlx4_eqe *eqe = &(ew->ib_eqe);
1188 u8 port = eqe->event.port_mgmt_change.port;
1193 switch (eqe->subtype) {
1194 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1195 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1197 /* Update the SM ah - This should be done before handling
1198 the other changed attributes so that MADs can be sent to the SM */
1199 if (changed_attr & MSTR_SM_CHANGE_MASK) {
1200 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1201 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1202 update_sm_ah(dev, port, lid, sl);
1205 /* Check if it is a lid change event */
1206 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
1207 handle_lid_change_event(dev, port);
1209 /* Generate GUID changed event */
1210 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1211 if (mlx4_is_master(dev->dev)) {
1215 if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
1216 err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
1218 gid.global.subnet_prefix =
1219 eqe->event.port_mgmt_change.params.port_info.gid_prefix;
1221 pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
1224 pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
1226 (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
1227 be64_to_cpu(gid.global.subnet_prefix));
1228 atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
1229 be64_to_cpu(gid.global.subnet_prefix));
1232 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1233 /*if master, notify all slaves*/
1234 if (mlx4_is_master(dev->dev))
1235 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1236 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
1239 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
1240 handle_client_rereg_event(dev, port);
1243 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1244 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
1245 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1246 propagate_pkey_ev(dev, port, eqe);
1248 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
1249 /* paravirtualized master's guid is guid 0 -- does not change */
1250 if (!mlx4_is_master(dev->dev))
1251 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1252 /*if master, notify relevant slaves*/
1253 else if (!dev->sriov.is_going_down) {
1254 tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1255 change_bitmap = GET_MASK_FROM_EQE(eqe);
1256 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1260 case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
1261 /* cache sl to vl mapping changes for use in
1262 * filling QP1 LRH VL field when sending packets
1264 if (!mlx4_is_slave(dev->dev)) {
1265 union sl2vl_tbl_to_u64 sl2vl64;
1268 for (jj = 0; jj < 8; jj++) {
1270 eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
1271 pr_debug("port %u, sl2vl[%d] = %02x\n",
1272 port, jj, sl2vl64.sl8[jj]);
1274 atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
1278 pr_warn("Unsupported subtype 0x%x for "
1279 "Port Management Change event\n", eqe->subtype);
1285 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1286 enum ib_event_type type)
1288 struct ib_event event;
1290 event.device = &dev->ib_dev;
1291 event.element.port_num = port_num;
1294 ib_dispatch_event(&event);
1297 static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1299 unsigned long flags;
1300 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1301 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1302 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1303 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1304 queue_work(ctx->wq, &ctx->work);
1305 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1308 static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
1310 unsigned long flags;
1311 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1312 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1314 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1315 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1316 queue_work(ctx->wi_wq, &ctx->work);
1317 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1320 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1321 struct mlx4_ib_demux_pv_qp *tun_qp,
1324 struct ib_sge sg_list;
1325 struct ib_recv_wr recv_wr, *bad_recv_wr;
1328 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1329 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1331 sg_list.addr = tun_qp->ring[index].map;
1332 sg_list.length = size;
1333 sg_list.lkey = ctx->pd->local_dma_lkey;
1335 recv_wr.next = NULL;
1336 recv_wr.sg_list = &sg_list;
1337 recv_wr.num_sge = 1;
1338 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1339 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1340 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1341 size, DMA_FROM_DEVICE);
1342 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1345 static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1346 int slave, struct ib_sa_mad *sa_mad)
1350 /* dispatch to different sa handlers */
1351 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1352 case IB_SA_ATTR_MC_MEMBER_REC:
1353 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1361 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1363 int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
1365 return (qpn >= proxy_start && qpn <= proxy_start + 1);
1369 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1370 enum ib_qp_type dest_qpt, u16 pkey_index,
1371 u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr,
1372 u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
1376 struct ib_send_wr *bad_wr;
1377 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1378 struct mlx4_ib_demux_pv_qp *sqp;
1379 struct mlx4_mad_snd_buf *sqp_mad;
1381 struct ib_qp *send_qp = NULL;
1382 struct ib_global_route *grh;
1383 unsigned wire_tx_ix = 0;
1390 sqp_ctx = dev->sriov.sqps[port-1];
1392 /* check if proxy qp created */
1393 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1396 if (dest_qpt == IB_QPT_SMI) {
1398 sqp = &sqp_ctx->qp[0];
1399 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1402 sqp = &sqp_ctx->qp[1];
1403 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1409 grh = rdma_ah_retrieve_grh(attr);
1410 sgid_index = grh->sgid_index;
1411 grh->sgid_index = 0;
1412 ah = rdma_create_ah(sqp_ctx->pd, attr);
1415 grh->sgid_index = sgid_index;
1416 to_mah(ah)->av.ib.gid_index = sgid_index;
1417 /* get rid of force-loopback bit */
1418 to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1419 spin_lock(&sqp->tx_lock);
1420 if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1421 (MLX4_NUM_TUNNEL_BUFS - 1))
1424 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1425 spin_unlock(&sqp->tx_lock);
1429 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1430 if (sqp->tx_ring[wire_tx_ix].ah)
1431 rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1432 sqp->tx_ring[wire_tx_ix].ah = ah;
1433 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1434 sqp->tx_ring[wire_tx_ix].buf.map,
1435 sizeof (struct mlx4_mad_snd_buf),
1438 memcpy(&sqp_mad->payload, mad, sizeof *mad);
1440 ib_dma_sync_single_for_device(&dev->ib_dev,
1441 sqp->tx_ring[wire_tx_ix].buf.map,
1442 sizeof (struct mlx4_mad_snd_buf),
1445 list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1446 list.length = sizeof (struct mlx4_mad_snd_buf);
1447 list.lkey = sqp_ctx->pd->local_dma_lkey;
1451 wr.pkey_index = wire_pkey_ix;
1452 wr.remote_qkey = qkey;
1453 wr.remote_qpn = remote_qpn;
1455 wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1456 wr.wr.sg_list = &list;
1458 wr.wr.opcode = IB_WR_SEND;
1459 wr.wr.send_flags = IB_SEND_SIGNALED;
1461 memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
1462 if (vlan_id < 0x1000)
1463 vlan_id |= (rdma_ah_get_sl(attr) & 7) << 13;
1464 to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
1467 ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
1471 spin_lock(&sqp->tx_lock);
1473 spin_unlock(&sqp->tx_lock);
1474 sqp->tx_ring[wire_tx_ix].ah = NULL;
1476 rdma_destroy_ah(ah);
1480 static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1482 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1484 return mlx4_get_base_gid_ix(dev->dev, slave, port);
1487 static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1488 struct rdma_ah_attr *ah_attr)
1490 struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
1491 if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1492 grh->sgid_index = slave;
1494 grh->sgid_index += get_slave_base_gid_ix(dev, slave, port);
1497 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1499 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1500 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1501 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1502 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1503 struct mlx4_ib_ah ah;
1504 struct rdma_ah_attr ah_attr;
1512 /* Get slave that sent this packet */
1513 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1514 wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1515 (wc->src_qp & 0x1) != ctx->port - 1 ||
1517 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1520 slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1521 if (slave != ctx->slave) {
1522 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1523 "belongs to another slave\n", wc->src_qp);
1527 /* Map transaction ID */
1528 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1529 sizeof (struct mlx4_tunnel_mad),
1531 switch (tunnel->mad.mad_hdr.method) {
1532 case IB_MGMT_METHOD_SET:
1533 case IB_MGMT_METHOD_GET:
1534 case IB_MGMT_METHOD_REPORT:
1535 case IB_SA_METHOD_GET_TABLE:
1536 case IB_SA_METHOD_DELETE:
1537 case IB_SA_METHOD_GET_MULTI:
1538 case IB_SA_METHOD_GET_TRACE_TBL:
1539 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1541 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1542 "class:%d slave:%d\n", *slave_id,
1543 tunnel->mad.mad_hdr.mgmt_class, slave);
1551 /* Class-specific handling */
1552 switch (tunnel->mad.mad_hdr.mgmt_class) {
1553 case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1554 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1555 if (slave != mlx4_master_func_num(dev->dev) &&
1556 !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
1559 case IB_MGMT_CLASS_SUBN_ADM:
1560 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1561 (struct ib_sa_mad *) &tunnel->mad))
1564 case IB_MGMT_CLASS_CM:
1565 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1566 (struct ib_mad *) &tunnel->mad))
1569 case IB_MGMT_CLASS_DEVICE_MGMT:
1570 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1571 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1575 /* Drop unsupported classes for slaves in tunnel mode */
1576 if (slave != mlx4_master_func_num(dev->dev)) {
1577 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1578 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1583 /* We are using standard ib_core services to send the mad, so generate a
1584 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1585 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1586 ah.ibah.device = ctx->ib_dev;
1588 port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
1589 port = mlx4_slave_convert_port(dev->dev, slave, port);
1592 ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
1593 ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
1595 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1596 if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
1597 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
1598 dmac = rdma_ah_retrieve_dmac(&ah_attr);
1600 memcpy(dmac, tunnel->hdr.mac, ETH_ALEN);
1601 vlan_id = be16_to_cpu(tunnel->hdr.vlan);
1602 /* if slave have default vlan use it */
1603 if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
1605 rdma_ah_set_sl(&ah_attr, qos);
1607 mlx4_ib_send_to_wire(dev, slave, ctx->port,
1608 is_proxy_qp0(dev, wc->src_qp, slave) ?
1609 IB_QPT_SMI : IB_QPT_GSI,
1610 be16_to_cpu(tunnel->hdr.pkey_index),
1611 be32_to_cpu(tunnel->hdr.remote_qpn),
1612 be32_to_cpu(tunnel->hdr.qkey),
1613 &ah_attr, wc->smac, vlan_id, &tunnel->mad);
1616 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1617 enum ib_qp_type qp_type, int is_tun)
1620 struct mlx4_ib_demux_pv_qp *tun_qp;
1621 int rx_buf_size, tx_buf_size;
1623 if (qp_type > IB_QPT_GSI)
1626 tun_qp = &ctx->qp[qp_type];
1628 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1633 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1634 sizeof (struct mlx4_ib_tun_tx_buf),
1636 if (!tun_qp->tx_ring) {
1637 kfree(tun_qp->ring);
1638 tun_qp->ring = NULL;
1643 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1644 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1646 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1647 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1650 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1651 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1652 if (!tun_qp->ring[i].addr)
1654 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1655 tun_qp->ring[i].addr,
1658 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
1659 kfree(tun_qp->ring[i].addr);
1664 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1665 tun_qp->tx_ring[i].buf.addr =
1666 kmalloc(tx_buf_size, GFP_KERNEL);
1667 if (!tun_qp->tx_ring[i].buf.addr)
1669 tun_qp->tx_ring[i].buf.map =
1670 ib_dma_map_single(ctx->ib_dev,
1671 tun_qp->tx_ring[i].buf.addr,
1674 if (ib_dma_mapping_error(ctx->ib_dev,
1675 tun_qp->tx_ring[i].buf.map)) {
1676 kfree(tun_qp->tx_ring[i].buf.addr);
1679 tun_qp->tx_ring[i].ah = NULL;
1681 spin_lock_init(&tun_qp->tx_lock);
1682 tun_qp->tx_ix_head = 0;
1683 tun_qp->tx_ix_tail = 0;
1684 tun_qp->proxy_qpt = qp_type;
1691 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1692 tx_buf_size, DMA_TO_DEVICE);
1693 kfree(tun_qp->tx_ring[i].buf.addr);
1695 i = MLX4_NUM_TUNNEL_BUFS;
1699 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1700 rx_buf_size, DMA_FROM_DEVICE);
1701 kfree(tun_qp->ring[i].addr);
1703 kfree(tun_qp->tx_ring);
1704 tun_qp->tx_ring = NULL;
1705 kfree(tun_qp->ring);
1706 tun_qp->ring = NULL;
1710 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1711 enum ib_qp_type qp_type, int is_tun)
1714 struct mlx4_ib_demux_pv_qp *tun_qp;
1715 int rx_buf_size, tx_buf_size;
1717 if (qp_type > IB_QPT_GSI)
1720 tun_qp = &ctx->qp[qp_type];
1722 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1723 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1725 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1726 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1730 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1731 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1732 rx_buf_size, DMA_FROM_DEVICE);
1733 kfree(tun_qp->ring[i].addr);
1736 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1737 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1738 tx_buf_size, DMA_TO_DEVICE);
1739 kfree(tun_qp->tx_ring[i].buf.addr);
1740 if (tun_qp->tx_ring[i].ah)
1741 rdma_destroy_ah(tun_qp->tx_ring[i].ah);
1743 kfree(tun_qp->tx_ring);
1744 kfree(tun_qp->ring);
1747 static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1749 struct mlx4_ib_demux_pv_ctx *ctx;
1750 struct mlx4_ib_demux_pv_qp *tun_qp;
1753 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1754 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1756 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1757 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1758 if (wc.status == IB_WC_SUCCESS) {
1759 switch (wc.opcode) {
1761 mlx4_ib_multiplex_mad(ctx, &wc);
1762 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1764 (MLX4_NUM_TUNNEL_BUFS - 1));
1766 pr_err("Failed reposting tunnel "
1767 "buf:%lld\n", wc.wr_id);
1770 pr_debug("received tunnel send completion:"
1771 "wrid=0x%llx, status=0x%x\n",
1772 wc.wr_id, wc.status);
1773 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1774 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1775 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1777 spin_lock(&tun_qp->tx_lock);
1778 tun_qp->tx_ix_tail++;
1779 spin_unlock(&tun_qp->tx_lock);
1786 pr_debug("mlx4_ib: completion error in tunnel: %d."
1787 " status = %d, wrid = 0x%llx\n",
1788 ctx->slave, wc.status, wc.wr_id);
1789 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1790 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1791 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1792 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1794 spin_lock(&tun_qp->tx_lock);
1795 tun_qp->tx_ix_tail++;
1796 spin_unlock(&tun_qp->tx_lock);
1802 static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1804 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1806 /* It's worse than that! He's dead, Jim! */
1807 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1808 event->event, sqp->port);
1811 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1812 enum ib_qp_type qp_type, int create_tun)
1815 struct mlx4_ib_demux_pv_qp *tun_qp;
1816 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1817 struct ib_qp_attr attr;
1818 int qp_attr_mask_INIT;
1820 if (qp_type > IB_QPT_GSI)
1823 tun_qp = &ctx->qp[qp_type];
1825 memset(&qp_init_attr, 0, sizeof qp_init_attr);
1826 qp_init_attr.init_attr.send_cq = ctx->cq;
1827 qp_init_attr.init_attr.recv_cq = ctx->cq;
1828 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1829 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1830 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1831 qp_init_attr.init_attr.cap.max_send_sge = 1;
1832 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1834 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1835 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1836 qp_init_attr.port = ctx->port;
1837 qp_init_attr.slave = ctx->slave;
1838 qp_init_attr.proxy_qp_type = qp_type;
1839 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1840 IB_QP_QKEY | IB_QP_PORT;
1842 qp_init_attr.init_attr.qp_type = qp_type;
1843 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1844 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1846 qp_init_attr.init_attr.port_num = ctx->port;
1847 qp_init_attr.init_attr.qp_context = ctx;
1848 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1849 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1850 if (IS_ERR(tun_qp->qp)) {
1851 ret = PTR_ERR(tun_qp->qp);
1853 pr_err("Couldn't create %s QP (%d)\n",
1854 create_tun ? "tunnel" : "special", ret);
1858 memset(&attr, 0, sizeof attr);
1859 attr.qp_state = IB_QPS_INIT;
1862 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1863 ctx->port, IB_DEFAULT_PKEY_FULL,
1865 if (ret || !create_tun)
1867 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1868 attr.qkey = IB_QP1_QKEY;
1869 attr.port_num = ctx->port;
1870 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1872 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1873 create_tun ? "tunnel" : "special", ret);
1876 attr.qp_state = IB_QPS_RTR;
1877 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1879 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1880 create_tun ? "tunnel" : "special", ret);
1883 attr.qp_state = IB_QPS_RTS;
1885 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1887 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1888 create_tun ? "tunnel" : "special", ret);
1892 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1893 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1895 pr_err(" mlx4_ib_post_pv_buf error"
1896 " (err = %d, i = %d)\n", ret, i);
1903 ib_destroy_qp(tun_qp->qp);
1909 * IB MAD completion callback for real SQPs
1911 static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1913 struct mlx4_ib_demux_pv_ctx *ctx;
1914 struct mlx4_ib_demux_pv_qp *sqp;
1919 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1920 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1922 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1923 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1924 if (wc.status == IB_WC_SUCCESS) {
1925 switch (wc.opcode) {
1927 rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
1928 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1929 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1931 spin_lock(&sqp->tx_lock);
1933 spin_unlock(&sqp->tx_lock);
1936 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1937 (sqp->ring[wc.wr_id &
1938 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1939 grh = &(((struct mlx4_mad_rcv_buf *)
1940 (sqp->ring[wc.wr_id &
1941 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1942 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1943 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1944 (MLX4_NUM_TUNNEL_BUFS - 1)))
1945 pr_err("Failed reposting SQP "
1946 "buf:%lld\n", wc.wr_id);
1952 pr_debug("mlx4_ib: completion error in tunnel: %d."
1953 " status = %d, wrid = 0x%llx\n",
1954 ctx->slave, wc.status, wc.wr_id);
1955 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1956 rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
1957 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1958 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1960 spin_lock(&sqp->tx_lock);
1962 spin_unlock(&sqp->tx_lock);
1968 static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1969 struct mlx4_ib_demux_pv_ctx **ret_ctx)
1971 struct mlx4_ib_demux_pv_ctx *ctx;
1974 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1978 ctx->ib_dev = &dev->ib_dev;
1985 static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1987 if (dev->sriov.demux[port - 1].tun[slave]) {
1988 kfree(dev->sriov.demux[port - 1].tun[slave]);
1989 dev->sriov.demux[port - 1].tun[slave] = NULL;
1993 static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1994 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1997 struct ib_cq_init_attr cq_attr = {};
1999 if (ctx->state != DEMUX_PV_STATE_DOWN)
2002 ctx->state = DEMUX_PV_STATE_STARTING;
2003 /* have QP0 only if link layer is IB */
2004 if (rdma_port_get_link_layer(ibdev, ctx->port) ==
2005 IB_LINK_LAYER_INFINIBAND)
2009 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
2011 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
2016 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
2018 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
2022 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
2026 cq_attr.cqe = cq_size;
2027 ctx->cq = ib_create_cq(ctx->ib_dev,
2028 create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
2029 NULL, ctx, &cq_attr);
2030 if (IS_ERR(ctx->cq)) {
2031 ret = PTR_ERR(ctx->cq);
2032 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
2036 ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
2037 if (IS_ERR(ctx->pd)) {
2038 ret = PTR_ERR(ctx->pd);
2039 pr_err("Couldn't create tunnel PD (%d)\n", ret);
2044 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
2046 pr_err("Couldn't create %s QP0 (%d)\n",
2047 create_tun ? "tunnel for" : "", ret);
2052 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
2054 pr_err("Couldn't create %s QP1 (%d)\n",
2055 create_tun ? "tunnel for" : "", ret);
2060 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
2062 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
2064 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
2065 ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
2067 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
2069 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
2072 ctx->state = DEMUX_PV_STATE_ACTIVE;
2077 ib_destroy_qp(ctx->qp[1].qp);
2078 ctx->qp[1].qp = NULL;
2083 ib_destroy_qp(ctx->qp[0].qp);
2084 ctx->qp[0].qp = NULL;
2087 ib_dealloc_pd(ctx->pd);
2091 ib_destroy_cq(ctx->cq);
2095 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
2099 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
2101 ctx->state = DEMUX_PV_STATE_DOWN;
2105 static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
2106 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
2110 if (ctx->state > DEMUX_PV_STATE_DOWN) {
2111 ctx->state = DEMUX_PV_STATE_DOWNING;
2113 flush_workqueue(ctx->wq);
2115 ib_destroy_qp(ctx->qp[0].qp);
2116 ctx->qp[0].qp = NULL;
2117 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
2119 ib_destroy_qp(ctx->qp[1].qp);
2120 ctx->qp[1].qp = NULL;
2121 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
2122 ib_dealloc_pd(ctx->pd);
2124 ib_destroy_cq(ctx->cq);
2126 ctx->state = DEMUX_PV_STATE_DOWN;
2130 static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
2131 int port, int do_init)
2136 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
2137 /* for master, destroy real sqp resources */
2138 if (slave == mlx4_master_func_num(dev->dev))
2139 destroy_pv_resources(dev, slave, port,
2140 dev->sriov.sqps[port - 1], 1);
2141 /* destroy the tunnel qp resources */
2142 destroy_pv_resources(dev, slave, port,
2143 dev->sriov.demux[port - 1].tun[slave], 1);
2147 /* create the tunnel qp resources */
2148 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
2149 dev->sriov.demux[port - 1].tun[slave]);
2151 /* for master, create the real sqp resources */
2152 if (!ret && slave == mlx4_master_func_num(dev->dev))
2153 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
2154 dev->sriov.sqps[port - 1]);
2158 void mlx4_ib_tunnels_update_work(struct work_struct *work)
2160 struct mlx4_ib_demux_work *dmxw;
2162 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
2163 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
2169 static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
2170 struct mlx4_ib_demux_ctx *ctx,
2177 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
2178 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
2184 ctx->ib_dev = &dev->ib_dev;
2187 i < min(dev->dev->caps.sqp_demux,
2188 (u16)(dev->dev->persist->num_vfs + 1));
2190 struct mlx4_active_ports actv_ports =
2191 mlx4_get_active_ports(dev->dev, i);
2193 if (!test_bit(port - 1, actv_ports.ports))
2196 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
2203 ret = mlx4_ib_mcg_port_init(ctx);
2205 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
2209 snprintf(name, sizeof(name), "mlx4_ibt%d", port);
2210 ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2212 pr_err("Failed to create tunnelling WQ for port %d\n", port);
2217 snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
2218 ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2220 pr_err("Failed to create wire WQ for port %d\n", port);
2225 snprintf(name, sizeof(name), "mlx4_ibud%d", port);
2226 ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2228 pr_err("Failed to create up/down WQ for port %d\n", port);
2236 destroy_workqueue(ctx->wi_wq);
2240 destroy_workqueue(ctx->wq);
2244 mlx4_ib_mcg_port_cleanup(ctx, 1);
2246 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
2247 free_pv_object(dev, i, port);
2253 static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2255 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2256 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2257 flush_workqueue(sqp_ctx->wq);
2258 if (sqp_ctx->has_smi) {
2259 ib_destroy_qp(sqp_ctx->qp[0].qp);
2260 sqp_ctx->qp[0].qp = NULL;
2261 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2263 ib_destroy_qp(sqp_ctx->qp[1].qp);
2264 sqp_ctx->qp[1].qp = NULL;
2265 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2266 ib_dealloc_pd(sqp_ctx->pd);
2268 ib_destroy_cq(sqp_ctx->cq);
2270 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2274 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2278 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
2279 mlx4_ib_mcg_port_cleanup(ctx, 1);
2280 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2283 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2284 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2286 flush_workqueue(ctx->wq);
2287 flush_workqueue(ctx->wi_wq);
2288 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2289 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2290 free_pv_object(dev, i, ctx->port);
2293 destroy_workqueue(ctx->ud_wq);
2294 destroy_workqueue(ctx->wi_wq);
2295 destroy_workqueue(ctx->wq);
2299 static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2303 if (!mlx4_is_master(dev->dev))
2305 /* initialize or tear down tunnel QPs for the master */
2306 for (i = 0; i < dev->dev->caps.num_ports; i++)
2307 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2311 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2316 if (!mlx4_is_mfunc(dev->dev))
2319 dev->sriov.is_going_down = 0;
2320 spin_lock_init(&dev->sriov.going_down_lock);
2321 mlx4_ib_cm_paravirt_init(dev);
2323 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2325 if (mlx4_is_slave(dev->dev)) {
2326 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2330 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2331 if (i == mlx4_master_func_num(dev->dev))
2332 mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2334 mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2337 err = mlx4_ib_init_alias_guid_service(dev);
2339 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2342 err = mlx4_ib_device_register_sysfs(dev);
2344 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2348 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2349 dev->dev->caps.sqp_demux);
2350 for (i = 0; i < dev->num_ports; i++) {
2352 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2355 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2356 atomic64_set(&dev->sriov.demux[i].subnet_prefix,
2357 be64_to_cpu(gid.global.subnet_prefix));
2358 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2359 &dev->sriov.sqps[i]);
2362 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2366 mlx4_ib_master_tunnels(dev, 1);
2370 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2373 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2374 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2376 mlx4_ib_device_unregister_sysfs(dev);
2379 mlx4_ib_destroy_alias_guid_service(dev);
2382 mlx4_ib_cm_paravirt_clean(dev, -1);
2387 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2390 unsigned long flags;
2392 if (!mlx4_is_mfunc(dev->dev))
2395 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2396 dev->sriov.is_going_down = 1;
2397 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2398 if (mlx4_is_master(dev->dev)) {
2399 for (i = 0; i < dev->num_ports; i++) {
2400 flush_workqueue(dev->sriov.demux[i].ud_wq);
2401 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2402 kfree(dev->sriov.sqps[i]);
2403 dev->sriov.sqps[i] = NULL;
2404 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2407 mlx4_ib_cm_paravirt_clean(dev, -1);
2408 mlx4_ib_destroy_alias_guid_service(dev);
2409 mlx4_ib_device_unregister_sysfs(dev);