2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/idr.h>
42 #include <linux/slab.h>
43 #include <linux/module.h>
44 #include <linux/security.h>
45 #include <rdma/ib_cache.h>
48 #include "core_priv.h"
54 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
55 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
57 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
58 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
59 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
60 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
63 * The mlx4 driver uses the top byte to distinguish which virtual function
64 * generated the MAD, so we must avoid using it.
66 #define AGENT_ID_LIMIT (1 << 24)
67 static DEFINE_IDR(ib_mad_clients);
68 static struct list_head ib_mad_port_list;
71 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
73 /* Forward declarations */
74 static int method_in_use(struct ib_mad_mgmt_method_table **method,
75 struct ib_mad_reg_req *mad_reg_req);
76 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
77 static struct ib_mad_agent_private *find_mad_agent(
78 struct ib_mad_port_private *port_priv,
79 const struct ib_mad_hdr *mad);
80 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
81 struct ib_mad_private *mad);
82 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
83 static void timeout_sends(struct work_struct *work);
84 static void local_completions(struct work_struct *work);
85 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
86 struct ib_mad_agent_private *agent_priv,
88 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
89 struct ib_mad_agent_private *agent_priv);
90 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
92 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
95 * Returns a ib_mad_port_private structure or NULL for a device/port
96 * Assumes ib_mad_port_list_lock is being held
98 static inline struct ib_mad_port_private *
99 __ib_get_mad_port(struct ib_device *device, int port_num)
101 struct ib_mad_port_private *entry;
103 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
104 if (entry->device == device && entry->port_num == port_num)
111 * Wrapper function to return a ib_mad_port_private structure or NULL
114 static inline struct ib_mad_port_private *
115 ib_get_mad_port(struct ib_device *device, int port_num)
117 struct ib_mad_port_private *entry;
120 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
121 entry = __ib_get_mad_port(device, port_num);
122 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
127 static inline u8 convert_mgmt_class(u8 mgmt_class)
129 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
130 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
134 static int get_spl_qp_index(enum ib_qp_type qp_type)
147 static int vendor_class_index(u8 mgmt_class)
149 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
152 static int is_vendor_class(u8 mgmt_class)
154 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
155 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
160 static int is_vendor_oui(char *oui)
162 if (oui[0] || oui[1] || oui[2])
167 static int is_vendor_method_in_use(
168 struct ib_mad_mgmt_vendor_class *vendor_class,
169 struct ib_mad_reg_req *mad_reg_req)
171 struct ib_mad_mgmt_method_table *method;
174 for (i = 0; i < MAX_MGMT_OUI; i++) {
175 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
176 method = vendor_class->method_table[i];
178 if (method_in_use(&method, mad_reg_req))
188 int ib_response_mad(const struct ib_mad_hdr *hdr)
190 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
191 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
192 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
193 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
195 EXPORT_SYMBOL(ib_response_mad);
198 * ib_register_mad_agent - Register to send/receive MADs
200 * Context: Process context.
202 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
204 enum ib_qp_type qp_type,
205 struct ib_mad_reg_req *mad_reg_req,
207 ib_mad_send_handler send_handler,
208 ib_mad_recv_handler recv_handler,
210 u32 registration_flags)
212 struct ib_mad_port_private *port_priv;
213 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
214 struct ib_mad_agent_private *mad_agent_priv;
215 struct ib_mad_reg_req *reg_req = NULL;
216 struct ib_mad_mgmt_class_table *class;
217 struct ib_mad_mgmt_vendor_class_table *vendor;
218 struct ib_mad_mgmt_vendor_class *vendor_class;
219 struct ib_mad_mgmt_method_table *method;
221 u8 mgmt_class, vclass;
223 /* Validate parameters */
224 qpn = get_spl_qp_index(qp_type);
226 dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
231 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
232 dev_dbg_ratelimited(&device->dev,
233 "%s: invalid RMPP Version %u\n",
234 __func__, rmpp_version);
238 /* Validate MAD registration request if supplied */
240 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
241 dev_dbg_ratelimited(&device->dev,
242 "%s: invalid Class Version %u\n",
244 mad_reg_req->mgmt_class_version);
248 dev_dbg_ratelimited(&device->dev,
249 "%s: no recv_handler\n", __func__);
252 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
254 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
255 * one in this range currently allowed
257 if (mad_reg_req->mgmt_class !=
258 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
259 dev_dbg_ratelimited(&device->dev,
260 "%s: Invalid Mgmt Class 0x%x\n",
261 __func__, mad_reg_req->mgmt_class);
264 } else if (mad_reg_req->mgmt_class == 0) {
266 * Class 0 is reserved in IBA and is used for
267 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
269 dev_dbg_ratelimited(&device->dev,
270 "%s: Invalid Mgmt Class 0\n",
273 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
275 * If class is in "new" vendor range,
276 * ensure supplied OUI is not zero
278 if (!is_vendor_oui(mad_reg_req->oui)) {
279 dev_dbg_ratelimited(&device->dev,
280 "%s: No OUI specified for class 0x%x\n",
282 mad_reg_req->mgmt_class);
286 /* Make sure class supplied is consistent with RMPP */
287 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
289 dev_dbg_ratelimited(&device->dev,
290 "%s: RMPP version for non-RMPP class 0x%x\n",
291 __func__, mad_reg_req->mgmt_class);
296 /* Make sure class supplied is consistent with QP type */
297 if (qp_type == IB_QPT_SMI) {
298 if ((mad_reg_req->mgmt_class !=
299 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
300 (mad_reg_req->mgmt_class !=
301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
302 dev_dbg_ratelimited(&device->dev,
303 "%s: Invalid SM QP type: class 0x%x\n",
304 __func__, mad_reg_req->mgmt_class);
308 if ((mad_reg_req->mgmt_class ==
309 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
310 (mad_reg_req->mgmt_class ==
311 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
312 dev_dbg_ratelimited(&device->dev,
313 "%s: Invalid GS QP type: class 0x%x\n",
314 __func__, mad_reg_req->mgmt_class);
319 /* No registration request supplied */
322 if (registration_flags & IB_MAD_USER_RMPP)
326 /* Validate device and port */
327 port_priv = ib_get_mad_port(device, port_num);
329 dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
331 ret = ERR_PTR(-ENODEV);
335 /* Verify the QP requested is supported. For example, Ethernet devices
338 if (!port_priv->qp_info[qpn].qp) {
339 dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
341 ret = ERR_PTR(-EPROTONOSUPPORT);
345 /* Allocate structures */
346 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
347 if (!mad_agent_priv) {
348 ret = ERR_PTR(-ENOMEM);
353 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
355 ret = ERR_PTR(-ENOMEM);
360 /* Now, fill in the various structures */
361 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
362 mad_agent_priv->reg_req = reg_req;
363 mad_agent_priv->agent.rmpp_version = rmpp_version;
364 mad_agent_priv->agent.device = device;
365 mad_agent_priv->agent.recv_handler = recv_handler;
366 mad_agent_priv->agent.send_handler = send_handler;
367 mad_agent_priv->agent.context = context;
368 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
369 mad_agent_priv->agent.port_num = port_num;
370 mad_agent_priv->agent.flags = registration_flags;
371 spin_lock_init(&mad_agent_priv->lock);
372 INIT_LIST_HEAD(&mad_agent_priv->send_list);
373 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
374 INIT_LIST_HEAD(&mad_agent_priv->done_list);
375 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
376 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
377 INIT_LIST_HEAD(&mad_agent_priv->local_list);
378 INIT_WORK(&mad_agent_priv->local_work, local_completions);
379 atomic_set(&mad_agent_priv->refcount, 1);
380 init_completion(&mad_agent_priv->comp);
382 ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
388 idr_preload(GFP_KERNEL);
389 idr_lock(&ib_mad_clients);
390 ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0,
391 AGENT_ID_LIMIT, GFP_ATOMIC);
392 idr_unlock(&ib_mad_clients);
399 mad_agent_priv->agent.hi_tid = ret2;
402 * Make sure MAD registration (if supplied)
403 * is non overlapping with any existing ones
405 spin_lock_irq(&port_priv->reg_lock);
407 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
408 if (!is_vendor_class(mgmt_class)) {
409 class = port_priv->version[mad_reg_req->
410 mgmt_class_version].class;
412 method = class->method_table[mgmt_class];
414 if (method_in_use(&method,
419 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
422 /* "New" vendor class range */
423 vendor = port_priv->version[mad_reg_req->
424 mgmt_class_version].vendor;
426 vclass = vendor_class_index(mgmt_class);
427 vendor_class = vendor->vendor_class[vclass];
429 if (is_vendor_method_in_use(
435 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
442 spin_unlock_irq(&port_priv->reg_lock);
444 return &mad_agent_priv->agent;
446 spin_unlock_irq(&port_priv->reg_lock);
447 idr_lock(&ib_mad_clients);
448 idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
449 idr_unlock(&ib_mad_clients);
451 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
455 kfree(mad_agent_priv);
459 EXPORT_SYMBOL(ib_register_mad_agent);
461 static inline int is_snooping_sends(int mad_snoop_flags)
463 return (mad_snoop_flags &
464 (/*IB_MAD_SNOOP_POSTED_SENDS |
465 IB_MAD_SNOOP_RMPP_SENDS |*/
466 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
467 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
470 static inline int is_snooping_recvs(int mad_snoop_flags)
472 return (mad_snoop_flags &
473 (IB_MAD_SNOOP_RECVS /*|
474 IB_MAD_SNOOP_RMPP_RECVS*/));
477 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
478 struct ib_mad_snoop_private *mad_snoop_priv)
480 struct ib_mad_snoop_private **new_snoop_table;
484 spin_lock_irqsave(&qp_info->snoop_lock, flags);
485 /* Check for empty slot in array. */
486 for (i = 0; i < qp_info->snoop_table_size; i++)
487 if (!qp_info->snoop_table[i])
490 if (i == qp_info->snoop_table_size) {
492 new_snoop_table = krealloc(qp_info->snoop_table,
493 sizeof mad_snoop_priv *
494 (qp_info->snoop_table_size + 1),
496 if (!new_snoop_table) {
501 qp_info->snoop_table = new_snoop_table;
502 qp_info->snoop_table_size++;
504 qp_info->snoop_table[i] = mad_snoop_priv;
505 atomic_inc(&qp_info->snoop_count);
507 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
511 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
513 enum ib_qp_type qp_type,
515 ib_mad_snoop_handler snoop_handler,
516 ib_mad_recv_handler recv_handler,
519 struct ib_mad_port_private *port_priv;
520 struct ib_mad_agent *ret;
521 struct ib_mad_snoop_private *mad_snoop_priv;
525 /* Validate parameters */
526 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
527 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
528 ret = ERR_PTR(-EINVAL);
531 qpn = get_spl_qp_index(qp_type);
533 ret = ERR_PTR(-EINVAL);
536 port_priv = ib_get_mad_port(device, port_num);
538 ret = ERR_PTR(-ENODEV);
541 /* Allocate structures */
542 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
543 if (!mad_snoop_priv) {
544 ret = ERR_PTR(-ENOMEM);
548 /* Now, fill in the various structures */
549 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
550 mad_snoop_priv->agent.device = device;
551 mad_snoop_priv->agent.recv_handler = recv_handler;
552 mad_snoop_priv->agent.snoop_handler = snoop_handler;
553 mad_snoop_priv->agent.context = context;
554 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
555 mad_snoop_priv->agent.port_num = port_num;
556 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
557 init_completion(&mad_snoop_priv->comp);
559 err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type);
565 mad_snoop_priv->snoop_index = register_snoop_agent(
566 &port_priv->qp_info[qpn],
568 if (mad_snoop_priv->snoop_index < 0) {
569 ret = ERR_PTR(mad_snoop_priv->snoop_index);
573 atomic_set(&mad_snoop_priv->refcount, 1);
574 return &mad_snoop_priv->agent;
576 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
578 kfree(mad_snoop_priv);
582 EXPORT_SYMBOL(ib_register_mad_snoop);
584 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
586 if (atomic_dec_and_test(&mad_agent_priv->refcount))
587 complete(&mad_agent_priv->comp);
590 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
592 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
593 complete(&mad_snoop_priv->comp);
596 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
598 struct ib_mad_port_private *port_priv;
600 /* Note that we could still be handling received MADs */
603 * Canceling all sends results in dropping received response
604 * MADs, preventing us from queuing additional work
606 cancel_mads(mad_agent_priv);
607 port_priv = mad_agent_priv->qp_info->port_priv;
608 cancel_delayed_work(&mad_agent_priv->timed_work);
610 spin_lock_irq(&port_priv->reg_lock);
611 remove_mad_reg_req(mad_agent_priv);
612 spin_unlock_irq(&port_priv->reg_lock);
613 idr_lock(&ib_mad_clients);
614 idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
615 idr_unlock(&ib_mad_clients);
617 flush_workqueue(port_priv->wq);
619 deref_mad_agent(mad_agent_priv);
620 wait_for_completion(&mad_agent_priv->comp);
621 ib_cancel_rmpp_recvs(mad_agent_priv);
623 ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
625 kfree(mad_agent_priv->reg_req);
626 kfree_rcu(mad_agent_priv, rcu);
629 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
631 struct ib_mad_qp_info *qp_info;
634 qp_info = mad_snoop_priv->qp_info;
635 spin_lock_irqsave(&qp_info->snoop_lock, flags);
636 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
637 atomic_dec(&qp_info->snoop_count);
638 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
640 deref_snoop_agent(mad_snoop_priv);
641 wait_for_completion(&mad_snoop_priv->comp);
643 ib_mad_agent_security_cleanup(&mad_snoop_priv->agent);
645 kfree(mad_snoop_priv);
649 * ib_unregister_mad_agent - Unregisters a client from using MAD services
651 * Context: Process context.
653 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
655 struct ib_mad_agent_private *mad_agent_priv;
656 struct ib_mad_snoop_private *mad_snoop_priv;
658 /* If the TID is zero, the agent can only snoop. */
659 if (mad_agent->hi_tid) {
660 mad_agent_priv = container_of(mad_agent,
661 struct ib_mad_agent_private,
663 unregister_mad_agent(mad_agent_priv);
665 mad_snoop_priv = container_of(mad_agent,
666 struct ib_mad_snoop_private,
668 unregister_mad_snoop(mad_snoop_priv);
671 EXPORT_SYMBOL(ib_unregister_mad_agent);
673 static void dequeue_mad(struct ib_mad_list_head *mad_list)
675 struct ib_mad_queue *mad_queue;
678 mad_queue = mad_list->mad_queue;
679 spin_lock_irqsave(&mad_queue->lock, flags);
680 list_del(&mad_list->list);
682 spin_unlock_irqrestore(&mad_queue->lock, flags);
685 static void snoop_send(struct ib_mad_qp_info *qp_info,
686 struct ib_mad_send_buf *send_buf,
687 struct ib_mad_send_wc *mad_send_wc,
690 struct ib_mad_snoop_private *mad_snoop_priv;
694 spin_lock_irqsave(&qp_info->snoop_lock, flags);
695 for (i = 0; i < qp_info->snoop_table_size; i++) {
696 mad_snoop_priv = qp_info->snoop_table[i];
697 if (!mad_snoop_priv ||
698 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
701 atomic_inc(&mad_snoop_priv->refcount);
702 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
703 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
704 send_buf, mad_send_wc);
705 deref_snoop_agent(mad_snoop_priv);
706 spin_lock_irqsave(&qp_info->snoop_lock, flags);
708 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
711 static void snoop_recv(struct ib_mad_qp_info *qp_info,
712 struct ib_mad_recv_wc *mad_recv_wc,
715 struct ib_mad_snoop_private *mad_snoop_priv;
719 spin_lock_irqsave(&qp_info->snoop_lock, flags);
720 for (i = 0; i < qp_info->snoop_table_size; i++) {
721 mad_snoop_priv = qp_info->snoop_table[i];
722 if (!mad_snoop_priv ||
723 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
726 atomic_inc(&mad_snoop_priv->refcount);
727 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
728 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
730 deref_snoop_agent(mad_snoop_priv);
731 spin_lock_irqsave(&qp_info->snoop_lock, flags);
733 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
736 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
737 u16 pkey_index, u8 port_num, struct ib_wc *wc)
739 memset(wc, 0, sizeof *wc);
741 wc->status = IB_WC_SUCCESS;
742 wc->opcode = IB_WC_RECV;
743 wc->pkey_index = pkey_index;
744 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
749 wc->dlid_path_bits = 0;
750 wc->port_num = port_num;
753 static size_t mad_priv_size(const struct ib_mad_private *mp)
755 return sizeof(struct ib_mad_private) + mp->mad_size;
758 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
760 size_t size = sizeof(struct ib_mad_private) + mad_size;
761 struct ib_mad_private *ret = kzalloc(size, flags);
764 ret->mad_size = mad_size;
769 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
771 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
774 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
776 return sizeof(struct ib_grh) + mp->mad_size;
780 * Return 0 if SMP is to be sent
781 * Return 1 if SMP was consumed locally (whether or not solicited)
782 * Return < 0 if error
784 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
785 struct ib_mad_send_wr_private *mad_send_wr)
788 struct ib_smp *smp = mad_send_wr->send_buf.mad;
789 struct opa_smp *opa_smp = (struct opa_smp *)smp;
791 struct ib_mad_local_private *local;
792 struct ib_mad_private *mad_priv;
793 struct ib_mad_port_private *port_priv;
794 struct ib_mad_agent_private *recv_mad_agent = NULL;
795 struct ib_device *device = mad_agent_priv->agent.device;
798 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
799 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
800 u16 out_mad_pkey_index = 0;
802 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
803 mad_agent_priv->qp_info->port_priv->port_num);
805 if (rdma_cap_ib_switch(device) &&
806 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
807 port_num = send_wr->port_num;
809 port_num = mad_agent_priv->agent.port_num;
812 * Directed route handling starts if the initial LID routed part of
813 * a request or the ending LID routed part of a response is empty.
814 * If we are at the start of the LID routed part, don't update the
815 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
817 if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
820 if ((opa_get_smp_direction(opa_smp)
821 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
822 OPA_LID_PERMISSIVE &&
823 opa_smi_handle_dr_smp_send(opa_smp,
824 rdma_cap_ib_switch(device),
825 port_num) == IB_SMI_DISCARD) {
827 dev_err(&device->dev, "OPA Invalid directed route\n");
830 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
831 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
832 opa_drslid & 0xffff0000) {
834 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
838 drslid = (u16)(opa_drslid & 0x0000ffff);
840 /* Check to post send on QP or process locally */
841 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
842 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
845 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
847 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
850 dev_err(&device->dev, "Invalid directed route\n");
853 drslid = be16_to_cpu(smp->dr_slid);
855 /* Check to post send on QP or process locally */
856 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
857 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
861 local = kmalloc(sizeof *local, GFP_ATOMIC);
866 local->mad_priv = NULL;
867 local->recv_mad_agent = NULL;
868 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
875 build_smp_wc(mad_agent_priv->agent.qp,
876 send_wr->wr.wr_cqe, drslid,
878 send_wr->port_num, &mad_wc);
880 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
881 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
882 + mad_send_wr->send_buf.data_len
883 + sizeof(struct ib_grh);
886 /* No GRH for DR SMP */
887 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
888 (const struct ib_mad_hdr *)smp, mad_size,
889 (struct ib_mad_hdr *)mad_priv->mad,
890 &mad_size, &out_mad_pkey_index);
893 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
894 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
895 mad_agent_priv->agent.recv_handler) {
896 local->mad_priv = mad_priv;
897 local->recv_mad_agent = mad_agent_priv;
899 * Reference MAD agent until receive
900 * side of local completion handled
902 atomic_inc(&mad_agent_priv->refcount);
906 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
909 case IB_MAD_RESULT_SUCCESS:
910 /* Treat like an incoming receive MAD */
911 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
912 mad_agent_priv->agent.port_num);
914 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
915 recv_mad_agent = find_mad_agent(port_priv,
916 (const struct ib_mad_hdr *)mad_priv->mad);
918 if (!port_priv || !recv_mad_agent) {
920 * No receiving agent so drop packet and
921 * generate send completion.
926 local->mad_priv = mad_priv;
927 local->recv_mad_agent = recv_mad_agent;
936 local->mad_send_wr = mad_send_wr;
938 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
939 local->return_wc_byte_len = mad_size;
941 /* Reference MAD agent until send side of local completion handled */
942 atomic_inc(&mad_agent_priv->refcount);
943 /* Queue local completion to local list */
944 spin_lock_irqsave(&mad_agent_priv->lock, flags);
945 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
946 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
947 queue_work(mad_agent_priv->qp_info->port_priv->wq,
948 &mad_agent_priv->local_work);
954 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
958 seg_size = mad_size - hdr_len;
959 if (data_len && seg_size) {
960 pad = seg_size - data_len % seg_size;
961 return pad == seg_size ? 0 : pad;
966 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
968 struct ib_rmpp_segment *s, *t;
970 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
976 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
977 size_t mad_size, gfp_t gfp_mask)
979 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
980 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
981 struct ib_rmpp_segment *seg = NULL;
982 int left, seg_size, pad;
984 send_buf->seg_size = mad_size - send_buf->hdr_len;
985 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
986 seg_size = send_buf->seg_size;
989 /* Allocate data segments. */
990 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
991 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
993 free_send_rmpp_list(send_wr);
996 seg->num = ++send_buf->seg_count;
997 list_add_tail(&seg->list, &send_wr->rmpp_list);
1000 /* Zero any padding */
1002 memset(seg->data + seg_size - pad, 0, pad);
1004 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
1006 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
1007 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
1009 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
1010 struct ib_rmpp_segment, list);
1011 send_wr->last_ack_seg = send_wr->cur_seg;
1015 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
1017 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
1019 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
1021 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
1022 u32 remote_qpn, u16 pkey_index,
1024 int hdr_len, int data_len,
1028 struct ib_mad_agent_private *mad_agent_priv;
1029 struct ib_mad_send_wr_private *mad_send_wr;
1030 int pad, message_size, ret, size;
1035 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
1038 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1040 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1041 mad_size = sizeof(struct opa_mad);
1043 mad_size = sizeof(struct ib_mad);
1045 pad = get_pad_size(hdr_len, data_len, mad_size);
1046 message_size = hdr_len + data_len + pad;
1048 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1049 if (!rmpp_active && message_size > mad_size)
1050 return ERR_PTR(-EINVAL);
1052 if (rmpp_active || message_size > mad_size)
1053 return ERR_PTR(-EINVAL);
1055 size = rmpp_active ? hdr_len : mad_size;
1056 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1058 return ERR_PTR(-ENOMEM);
1060 mad_send_wr = buf + size;
1061 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1062 mad_send_wr->send_buf.mad = buf;
1063 mad_send_wr->send_buf.hdr_len = hdr_len;
1064 mad_send_wr->send_buf.data_len = data_len;
1065 mad_send_wr->pad = pad;
1067 mad_send_wr->mad_agent_priv = mad_agent_priv;
1068 mad_send_wr->sg_list[0].length = hdr_len;
1069 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1071 /* OPA MADs don't have to be the full 2048 bytes */
1072 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1073 data_len < mad_size - hdr_len)
1074 mad_send_wr->sg_list[1].length = data_len;
1076 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1078 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1080 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1082 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1083 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1084 mad_send_wr->send_wr.wr.num_sge = 2;
1085 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1086 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1087 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1088 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1089 mad_send_wr->send_wr.pkey_index = pkey_index;
1092 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1095 return ERR_PTR(ret);
1099 mad_send_wr->send_buf.mad_agent = mad_agent;
1100 atomic_inc(&mad_agent_priv->refcount);
1101 return &mad_send_wr->send_buf;
1103 EXPORT_SYMBOL(ib_create_send_mad);
1105 int ib_get_mad_data_offset(u8 mgmt_class)
1107 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1108 return IB_MGMT_SA_HDR;
1109 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1110 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1111 (mgmt_class == IB_MGMT_CLASS_BIS))
1112 return IB_MGMT_DEVICE_HDR;
1113 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1114 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1115 return IB_MGMT_VENDOR_HDR;
1117 return IB_MGMT_MAD_HDR;
1119 EXPORT_SYMBOL(ib_get_mad_data_offset);
1121 int ib_is_mad_class_rmpp(u8 mgmt_class)
1123 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1124 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1125 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1126 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1127 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1128 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1132 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1134 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1136 struct ib_mad_send_wr_private *mad_send_wr;
1137 struct list_head *list;
1139 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1141 list = &mad_send_wr->cur_seg->list;
1143 if (mad_send_wr->cur_seg->num < seg_num) {
1144 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1145 if (mad_send_wr->cur_seg->num == seg_num)
1147 } else if (mad_send_wr->cur_seg->num > seg_num) {
1148 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1149 if (mad_send_wr->cur_seg->num == seg_num)
1152 return mad_send_wr->cur_seg->data;
1154 EXPORT_SYMBOL(ib_get_rmpp_segment);
1156 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1158 if (mad_send_wr->send_buf.seg_count)
1159 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1160 mad_send_wr->seg_num);
1162 return mad_send_wr->send_buf.mad +
1163 mad_send_wr->send_buf.hdr_len;
1166 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1168 struct ib_mad_agent_private *mad_agent_priv;
1169 struct ib_mad_send_wr_private *mad_send_wr;
1171 mad_agent_priv = container_of(send_buf->mad_agent,
1172 struct ib_mad_agent_private, agent);
1173 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1176 free_send_rmpp_list(mad_send_wr);
1177 kfree(send_buf->mad);
1178 deref_mad_agent(mad_agent_priv);
1180 EXPORT_SYMBOL(ib_free_send_mad);
1182 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1184 struct ib_mad_qp_info *qp_info;
1185 struct list_head *list;
1186 struct ib_mad_agent *mad_agent;
1188 unsigned long flags;
1191 /* Set WR ID to find mad_send_wr upon completion */
1192 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1193 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1194 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1195 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1197 mad_agent = mad_send_wr->send_buf.mad_agent;
1198 sge = mad_send_wr->sg_list;
1199 sge[0].addr = ib_dma_map_single(mad_agent->device,
1200 mad_send_wr->send_buf.mad,
1203 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1206 mad_send_wr->header_mapping = sge[0].addr;
1208 sge[1].addr = ib_dma_map_single(mad_agent->device,
1209 ib_get_payload(mad_send_wr),
1212 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1213 ib_dma_unmap_single(mad_agent->device,
1214 mad_send_wr->header_mapping,
1215 sge[0].length, DMA_TO_DEVICE);
1218 mad_send_wr->payload_mapping = sge[1].addr;
1220 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1221 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1222 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1224 list = &qp_info->send_queue.list;
1227 list = &qp_info->overflow_list;
1231 qp_info->send_queue.count++;
1232 list_add_tail(&mad_send_wr->mad_list.list, list);
1234 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1236 ib_dma_unmap_single(mad_agent->device,
1237 mad_send_wr->header_mapping,
1238 sge[0].length, DMA_TO_DEVICE);
1239 ib_dma_unmap_single(mad_agent->device,
1240 mad_send_wr->payload_mapping,
1241 sge[1].length, DMA_TO_DEVICE);
1247 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1248 * with the registered client
1250 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1251 struct ib_mad_send_buf **bad_send_buf)
1253 struct ib_mad_agent_private *mad_agent_priv;
1254 struct ib_mad_send_buf *next_send_buf;
1255 struct ib_mad_send_wr_private *mad_send_wr;
1256 unsigned long flags;
1259 /* Walk list of send WRs and post each on send list */
1260 for (; send_buf; send_buf = next_send_buf) {
1261 mad_send_wr = container_of(send_buf,
1262 struct ib_mad_send_wr_private,
1264 mad_agent_priv = mad_send_wr->mad_agent_priv;
1266 ret = ib_mad_enforce_security(mad_agent_priv,
1267 mad_send_wr->send_wr.pkey_index);
1271 if (!send_buf->mad_agent->send_handler ||
1272 (send_buf->timeout_ms &&
1273 !send_buf->mad_agent->recv_handler)) {
1278 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1279 if (mad_agent_priv->agent.rmpp_version) {
1286 * Save pointer to next work request to post in case the
1287 * current one completes, and the user modifies the work
1288 * request associated with the completion
1290 next_send_buf = send_buf->next;
1291 mad_send_wr->send_wr.ah = send_buf->ah;
1293 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1294 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1295 ret = handle_outgoing_dr_smp(mad_agent_priv,
1297 if (ret < 0) /* error */
1299 else if (ret == 1) /* locally consumed */
1303 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1304 /* Timeout will be updated after send completes */
1305 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1306 mad_send_wr->max_retries = send_buf->retries;
1307 mad_send_wr->retries_left = send_buf->retries;
1308 send_buf->retries = 0;
1309 /* Reference for work request to QP + response */
1310 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1311 mad_send_wr->status = IB_WC_SUCCESS;
1313 /* Reference MAD agent until send completes */
1314 atomic_inc(&mad_agent_priv->refcount);
1315 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1316 list_add_tail(&mad_send_wr->agent_list,
1317 &mad_agent_priv->send_list);
1318 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1320 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1321 ret = ib_send_rmpp_mad(mad_send_wr);
1322 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1323 ret = ib_send_mad(mad_send_wr);
1325 ret = ib_send_mad(mad_send_wr);
1327 /* Fail send request */
1328 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1329 list_del(&mad_send_wr->agent_list);
1330 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1331 atomic_dec(&mad_agent_priv->refcount);
1338 *bad_send_buf = send_buf;
1341 EXPORT_SYMBOL(ib_post_send_mad);
1344 * ib_free_recv_mad - Returns data buffers used to receive
1345 * a MAD to the access layer
1347 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1349 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1350 struct ib_mad_private_header *mad_priv_hdr;
1351 struct ib_mad_private *priv;
1352 struct list_head free_list;
1354 INIT_LIST_HEAD(&free_list);
1355 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1357 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1359 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1361 mad_priv_hdr = container_of(mad_recv_wc,
1362 struct ib_mad_private_header,
1364 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1369 EXPORT_SYMBOL(ib_free_recv_mad);
1371 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1373 ib_mad_send_handler send_handler,
1374 ib_mad_recv_handler recv_handler,
1377 return ERR_PTR(-EINVAL); /* XXX: for now */
1379 EXPORT_SYMBOL(ib_redirect_mad_qp);
1381 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1384 dev_err(&mad_agent->device->dev,
1385 "ib_process_mad_wc() not implemented yet\n");
1388 EXPORT_SYMBOL(ib_process_mad_wc);
1390 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1391 struct ib_mad_reg_req *mad_reg_req)
1395 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1396 if ((*method)->agent[i]) {
1397 pr_err("Method %d already in use\n", i);
1404 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1406 /* Allocate management method table */
1407 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1408 return (*method) ? 0 : (-ENOMEM);
1412 * Check to see if there are any methods still in use
1414 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1418 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1419 if (method->agent[i])
1425 * Check to see if there are any method tables for this class still in use
1427 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1431 for (i = 0; i < MAX_MGMT_CLASS; i++)
1432 if (class->method_table[i])
1437 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1441 for (i = 0; i < MAX_MGMT_OUI; i++)
1442 if (vendor_class->method_table[i])
1447 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1452 for (i = 0; i < MAX_MGMT_OUI; i++)
1453 /* Is there matching OUI for this vendor class ? */
1454 if (!memcmp(vendor_class->oui[i], oui, 3))
1460 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1464 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1465 if (vendor->vendor_class[i])
1471 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1472 struct ib_mad_agent_private *agent)
1476 /* Remove any methods for this mad agent */
1477 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1478 if (method->agent[i] == agent) {
1479 method->agent[i] = NULL;
1484 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1485 struct ib_mad_agent_private *agent_priv,
1488 struct ib_mad_port_private *port_priv;
1489 struct ib_mad_mgmt_class_table **class;
1490 struct ib_mad_mgmt_method_table **method;
1493 port_priv = agent_priv->qp_info->port_priv;
1494 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1496 /* Allocate management class table for "new" class version */
1497 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1503 /* Allocate method table for this management class */
1504 method = &(*class)->method_table[mgmt_class];
1505 if ((ret = allocate_method_table(method)))
1508 method = &(*class)->method_table[mgmt_class];
1510 /* Allocate method table for this management class */
1511 if ((ret = allocate_method_table(method)))
1516 /* Now, make sure methods are not already in use */
1517 if (method_in_use(method, mad_reg_req))
1520 /* Finally, add in methods being registered */
1521 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1522 (*method)->agent[i] = agent_priv;
1527 /* Remove any methods for this mad agent */
1528 remove_methods_mad_agent(*method, agent_priv);
1529 /* Now, check to see if there are any methods in use */
1530 if (!check_method_table(*method)) {
1531 /* If not, release management method table */
1544 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1545 struct ib_mad_agent_private *agent_priv)
1547 struct ib_mad_port_private *port_priv;
1548 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1549 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1550 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1551 struct ib_mad_mgmt_method_table **method;
1552 int i, ret = -ENOMEM;
1555 /* "New" vendor (with OUI) class */
1556 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1557 port_priv = agent_priv->qp_info->port_priv;
1558 vendor_table = &port_priv->version[
1559 mad_reg_req->mgmt_class_version].vendor;
1560 if (!*vendor_table) {
1561 /* Allocate mgmt vendor class table for "new" class version */
1562 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1566 *vendor_table = vendor;
1568 if (!(*vendor_table)->vendor_class[vclass]) {
1569 /* Allocate table for this management vendor class */
1570 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1574 (*vendor_table)->vendor_class[vclass] = vendor_class;
1576 for (i = 0; i < MAX_MGMT_OUI; i++) {
1577 /* Is there matching OUI for this vendor class ? */
1578 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1579 mad_reg_req->oui, 3)) {
1580 method = &(*vendor_table)->vendor_class[
1581 vclass]->method_table[i];
1587 for (i = 0; i < MAX_MGMT_OUI; i++) {
1588 /* OUI slot available ? */
1589 if (!is_vendor_oui((*vendor_table)->vendor_class[
1591 method = &(*vendor_table)->vendor_class[
1592 vclass]->method_table[i];
1593 /* Allocate method table for this OUI */
1595 ret = allocate_method_table(method);
1599 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1600 mad_reg_req->oui, 3);
1604 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1608 /* Now, make sure methods are not already in use */
1609 if (method_in_use(method, mad_reg_req))
1612 /* Finally, add in methods being registered */
1613 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1614 (*method)->agent[i] = agent_priv;
1619 /* Remove any methods for this mad agent */
1620 remove_methods_mad_agent(*method, agent_priv);
1621 /* Now, check to see if there are any methods in use */
1622 if (!check_method_table(*method)) {
1623 /* If not, release management method table */
1630 (*vendor_table)->vendor_class[vclass] = NULL;
1631 kfree(vendor_class);
1635 *vendor_table = NULL;
1642 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1644 struct ib_mad_port_private *port_priv;
1645 struct ib_mad_mgmt_class_table *class;
1646 struct ib_mad_mgmt_method_table *method;
1647 struct ib_mad_mgmt_vendor_class_table *vendor;
1648 struct ib_mad_mgmt_vendor_class *vendor_class;
1653 * Was MAD registration request supplied
1654 * with original registration ?
1656 if (!agent_priv->reg_req) {
1660 port_priv = agent_priv->qp_info->port_priv;
1661 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1662 class = port_priv->version[
1663 agent_priv->reg_req->mgmt_class_version].class;
1667 method = class->method_table[mgmt_class];
1669 /* Remove any methods for this mad agent */
1670 remove_methods_mad_agent(method, agent_priv);
1671 /* Now, check to see if there are any methods still in use */
1672 if (!check_method_table(method)) {
1673 /* If not, release management method table */
1675 class->method_table[mgmt_class] = NULL;
1676 /* Any management classes left ? */
1677 if (!check_class_table(class)) {
1678 /* If not, release management class table */
1681 agent_priv->reg_req->
1682 mgmt_class_version].class = NULL;
1688 if (!is_vendor_class(mgmt_class))
1691 /* normalize mgmt_class to vendor range 2 */
1692 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1693 vendor = port_priv->version[
1694 agent_priv->reg_req->mgmt_class_version].vendor;
1699 vendor_class = vendor->vendor_class[mgmt_class];
1701 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1704 method = vendor_class->method_table[index];
1706 /* Remove any methods for this mad agent */
1707 remove_methods_mad_agent(method, agent_priv);
1709 * Now, check to see if there are
1710 * any methods still in use
1712 if (!check_method_table(method)) {
1713 /* If not, release management method table */
1715 vendor_class->method_table[index] = NULL;
1716 memset(vendor_class->oui[index], 0, 3);
1717 /* Any OUIs left ? */
1718 if (!check_vendor_class(vendor_class)) {
1719 /* If not, release vendor class table */
1720 kfree(vendor_class);
1721 vendor->vendor_class[mgmt_class] = NULL;
1722 /* Any other vendor classes left ? */
1723 if (!check_vendor_table(vendor)) {
1726 agent_priv->reg_req->
1727 mgmt_class_version].
1739 static struct ib_mad_agent_private *
1740 find_mad_agent(struct ib_mad_port_private *port_priv,
1741 const struct ib_mad_hdr *mad_hdr)
1743 struct ib_mad_agent_private *mad_agent = NULL;
1744 unsigned long flags;
1746 if (ib_response_mad(mad_hdr)) {
1750 * Routing is based on high 32 bits of transaction ID
1753 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1755 mad_agent = idr_find(&ib_mad_clients, hi_tid);
1756 if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
1760 struct ib_mad_mgmt_class_table *class;
1761 struct ib_mad_mgmt_method_table *method;
1762 struct ib_mad_mgmt_vendor_class_table *vendor;
1763 struct ib_mad_mgmt_vendor_class *vendor_class;
1764 const struct ib_vendor_mad *vendor_mad;
1767 spin_lock_irqsave(&port_priv->reg_lock, flags);
1769 * Routing is based on version, class, and method
1770 * For "newer" vendor MADs, also based on OUI
1772 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1774 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1775 class = port_priv->version[
1776 mad_hdr->class_version].class;
1779 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1780 ARRAY_SIZE(class->method_table))
1782 method = class->method_table[convert_mgmt_class(
1783 mad_hdr->mgmt_class)];
1785 mad_agent = method->agent[mad_hdr->method &
1786 ~IB_MGMT_METHOD_RESP];
1788 vendor = port_priv->version[
1789 mad_hdr->class_version].vendor;
1792 vendor_class = vendor->vendor_class[vendor_class_index(
1793 mad_hdr->mgmt_class)];
1796 /* Find matching OUI */
1797 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1798 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1801 method = vendor_class->method_table[index];
1803 mad_agent = method->agent[mad_hdr->method &
1804 ~IB_MGMT_METHOD_RESP];
1808 atomic_inc(&mad_agent->refcount);
1810 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1813 if (mad_agent && !mad_agent->agent.recv_handler) {
1814 dev_notice(&port_priv->device->dev,
1815 "No receive handler for client %p on port %d\n",
1816 &mad_agent->agent, port_priv->port_num);
1817 deref_mad_agent(mad_agent);
1824 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1825 const struct ib_mad_qp_info *qp_info,
1829 u32 qp_num = qp_info->qp->qp_num;
1831 /* Make sure MAD base version is understood */
1832 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1833 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1834 pr_err("MAD received with unsupported base version %d %s\n",
1835 mad_hdr->base_version, opa ? "(opa)" : "");
1839 /* Filter SMI packets sent to other than QP0 */
1840 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1841 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1845 /* CM attributes other than ClassPortInfo only use Send method */
1846 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1847 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1848 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1850 /* Filter GSI packets sent to QP0 */
1859 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1860 const struct ib_mad_hdr *mad_hdr)
1862 struct ib_rmpp_mad *rmpp_mad;
1864 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1865 return !mad_agent_priv->agent.rmpp_version ||
1866 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1867 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1868 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1869 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1872 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1873 const struct ib_mad_recv_wc *rwc)
1875 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1876 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1879 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1880 const struct ib_mad_send_wr_private *wr,
1881 const struct ib_mad_recv_wc *rwc )
1883 struct rdma_ah_attr attr;
1884 u8 send_resp, rcv_resp;
1886 struct ib_device *device = mad_agent_priv->agent.device;
1887 u8 port_num = mad_agent_priv->agent.port_num;
1891 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1892 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1894 if (send_resp == rcv_resp)
1895 /* both requests, or both responses. GIDs different */
1898 if (rdma_query_ah(wr->send_buf.ah, &attr))
1899 /* Assume not equal, to avoid false positives. */
1902 has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1903 if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1904 /* one has GID, other does not. Assume different */
1907 if (!send_resp && rcv_resp) {
1908 /* is request/response. */
1910 if (ib_get_cached_lmc(device, port_num, &lmc))
1912 return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1913 rwc->wc->dlid_path_bits) &
1916 const struct ib_global_route *grh =
1917 rdma_ah_read_grh(&attr);
1919 if (rdma_query_gid(device, port_num,
1920 grh->sgid_index, &sgid))
1922 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1928 return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1930 return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1931 rwc->recv_buf.grh->sgid.raw,
1935 static inline int is_direct(u8 class)
1937 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1940 struct ib_mad_send_wr_private*
1941 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1942 const struct ib_mad_recv_wc *wc)
1944 struct ib_mad_send_wr_private *wr;
1945 const struct ib_mad_hdr *mad_hdr;
1947 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1949 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1950 if ((wr->tid == mad_hdr->tid) &&
1951 rcv_has_same_class(wr, wc) &&
1953 * Don't check GID for direct routed MADs.
1954 * These might have permissive LIDs.
1956 (is_direct(mad_hdr->mgmt_class) ||
1957 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1958 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1962 * It's possible to receive the response before we've
1963 * been notified that the send has completed
1965 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1966 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1967 wr->tid == mad_hdr->tid &&
1969 rcv_has_same_class(wr, wc) &&
1971 * Don't check GID for direct routed MADs.
1972 * These might have permissive LIDs.
1974 (is_direct(mad_hdr->mgmt_class) ||
1975 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1976 /* Verify request has not been canceled */
1977 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1982 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1984 mad_send_wr->timeout = 0;
1985 if (mad_send_wr->refcount == 1)
1986 list_move_tail(&mad_send_wr->agent_list,
1987 &mad_send_wr->mad_agent_priv->done_list);
1990 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1991 struct ib_mad_recv_wc *mad_recv_wc)
1993 struct ib_mad_send_wr_private *mad_send_wr;
1994 struct ib_mad_send_wc mad_send_wc;
1995 unsigned long flags;
1998 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1999 ret = ib_mad_enforce_security(mad_agent_priv,
2000 mad_recv_wc->wc->pkey_index);
2002 ib_free_recv_mad(mad_recv_wc);
2003 deref_mad_agent(mad_agent_priv);
2007 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
2008 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2009 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
2012 deref_mad_agent(mad_agent_priv);
2017 /* Complete corresponding request */
2018 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
2019 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2020 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
2022 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2023 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
2024 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
2025 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
2026 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
2027 /* user rmpp is in effect
2028 * and this is an active RMPP MAD
2030 mad_agent_priv->agent.recv_handler(
2031 &mad_agent_priv->agent, NULL,
2033 atomic_dec(&mad_agent_priv->refcount);
2035 /* not user rmpp, revert to normal behavior and
2037 ib_free_recv_mad(mad_recv_wc);
2038 deref_mad_agent(mad_agent_priv);
2042 ib_mark_mad_done(mad_send_wr);
2043 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2045 /* Defined behavior is to complete response before request */
2046 mad_agent_priv->agent.recv_handler(
2047 &mad_agent_priv->agent,
2048 &mad_send_wr->send_buf,
2050 atomic_dec(&mad_agent_priv->refcount);
2052 mad_send_wc.status = IB_WC_SUCCESS;
2053 mad_send_wc.vendor_err = 0;
2054 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2055 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2058 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2060 deref_mad_agent(mad_agent_priv);
2066 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2067 const struct ib_mad_qp_info *qp_info,
2068 const struct ib_wc *wc,
2070 struct ib_mad_private *recv,
2071 struct ib_mad_private *response)
2073 enum smi_forward_action retsmi;
2074 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2076 if (smi_handle_dr_smp_recv(smp,
2077 rdma_cap_ib_switch(port_priv->device),
2079 port_priv->device->phys_port_cnt) ==
2081 return IB_SMI_DISCARD;
2083 retsmi = smi_check_forward_dr_smp(smp);
2084 if (retsmi == IB_SMI_LOCAL)
2085 return IB_SMI_HANDLE;
2087 if (retsmi == IB_SMI_SEND) { /* don't forward */
2088 if (smi_handle_dr_smp_send(smp,
2089 rdma_cap_ib_switch(port_priv->device),
2090 port_num) == IB_SMI_DISCARD)
2091 return IB_SMI_DISCARD;
2093 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2094 return IB_SMI_DISCARD;
2095 } else if (rdma_cap_ib_switch(port_priv->device)) {
2096 /* forward case for switches */
2097 memcpy(response, recv, mad_priv_size(response));
2098 response->header.recv_wc.wc = &response->header.wc;
2099 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2100 response->header.recv_wc.recv_buf.grh = &response->grh;
2102 agent_send_response((const struct ib_mad_hdr *)response->mad,
2105 smi_get_fwd_port(smp),
2106 qp_info->qp->qp_num,
2110 return IB_SMI_DISCARD;
2112 return IB_SMI_HANDLE;
2115 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2116 struct ib_mad_private *response,
2117 size_t *resp_len, bool opa)
2119 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2120 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2122 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2123 recv_hdr->method == IB_MGMT_METHOD_SET) {
2124 memcpy(response, recv, mad_priv_size(response));
2125 response->header.recv_wc.wc = &response->header.wc;
2126 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2127 response->header.recv_wc.recv_buf.grh = &response->grh;
2128 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2129 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2130 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2131 resp_hdr->status |= IB_SMP_DIRECTION;
2133 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2134 if (recv_hdr->mgmt_class ==
2135 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2136 recv_hdr->mgmt_class ==
2137 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2138 *resp_len = opa_get_smp_header_size(
2139 (struct opa_smp *)recv->mad);
2141 *resp_len = sizeof(struct ib_mad_hdr);
2150 static enum smi_action
2151 handle_opa_smi(struct ib_mad_port_private *port_priv,
2152 struct ib_mad_qp_info *qp_info,
2155 struct ib_mad_private *recv,
2156 struct ib_mad_private *response)
2158 enum smi_forward_action retsmi;
2159 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2161 if (opa_smi_handle_dr_smp_recv(smp,
2162 rdma_cap_ib_switch(port_priv->device),
2164 port_priv->device->phys_port_cnt) ==
2166 return IB_SMI_DISCARD;
2168 retsmi = opa_smi_check_forward_dr_smp(smp);
2169 if (retsmi == IB_SMI_LOCAL)
2170 return IB_SMI_HANDLE;
2172 if (retsmi == IB_SMI_SEND) { /* don't forward */
2173 if (opa_smi_handle_dr_smp_send(smp,
2174 rdma_cap_ib_switch(port_priv->device),
2175 port_num) == IB_SMI_DISCARD)
2176 return IB_SMI_DISCARD;
2178 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2180 return IB_SMI_DISCARD;
2182 } else if (rdma_cap_ib_switch(port_priv->device)) {
2183 /* forward case for switches */
2184 memcpy(response, recv, mad_priv_size(response));
2185 response->header.recv_wc.wc = &response->header.wc;
2186 response->header.recv_wc.recv_buf.opa_mad =
2187 (struct opa_mad *)response->mad;
2188 response->header.recv_wc.recv_buf.grh = &response->grh;
2190 agent_send_response((const struct ib_mad_hdr *)response->mad,
2193 opa_smi_get_fwd_port(smp),
2194 qp_info->qp->qp_num,
2195 recv->header.wc.byte_len,
2198 return IB_SMI_DISCARD;
2201 return IB_SMI_HANDLE;
2204 static enum smi_action
2205 handle_smi(struct ib_mad_port_private *port_priv,
2206 struct ib_mad_qp_info *qp_info,
2209 struct ib_mad_private *recv,
2210 struct ib_mad_private *response,
2213 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2215 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2216 mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2217 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2220 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2223 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2225 struct ib_mad_port_private *port_priv = cq->cq_context;
2226 struct ib_mad_list_head *mad_list =
2227 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2228 struct ib_mad_qp_info *qp_info;
2229 struct ib_mad_private_header *mad_priv_hdr;
2230 struct ib_mad_private *recv, *response = NULL;
2231 struct ib_mad_agent_private *mad_agent;
2233 int ret = IB_MAD_RESULT_SUCCESS;
2235 u16 resp_mad_pkey_index = 0;
2238 if (list_empty_careful(&port_priv->port_list))
2241 if (wc->status != IB_WC_SUCCESS) {
2243 * Receive errors indicate that the QP has entered the error
2244 * state - error handling/shutdown code will cleanup
2249 qp_info = mad_list->mad_queue->qp_info;
2250 dequeue_mad(mad_list);
2252 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2253 qp_info->port_priv->port_num);
2255 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2257 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2258 ib_dma_unmap_single(port_priv->device,
2259 recv->header.mapping,
2260 mad_priv_dma_size(recv),
2263 /* Setup MAD receive work completion from "normal" work completion */
2264 recv->header.wc = *wc;
2265 recv->header.recv_wc.wc = &recv->header.wc;
2267 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2268 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2269 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2271 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2272 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2275 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2276 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2278 if (atomic_read(&qp_info->snoop_count))
2279 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2282 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2285 mad_size = recv->mad_size;
2286 response = alloc_mad_private(mad_size, GFP_KERNEL);
2290 if (rdma_cap_ib_switch(port_priv->device))
2291 port_num = wc->port_num;
2293 port_num = port_priv->port_num;
2295 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2296 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2297 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2303 /* Give driver "right of first refusal" on incoming MAD */
2304 if (port_priv->device->process_mad) {
2305 ret = port_priv->device->process_mad(port_priv->device, 0,
2306 port_priv->port_num,
2308 (const struct ib_mad_hdr *)recv->mad,
2310 (struct ib_mad_hdr *)response->mad,
2311 &mad_size, &resp_mad_pkey_index);
2314 wc->pkey_index = resp_mad_pkey_index;
2316 if (ret & IB_MAD_RESULT_SUCCESS) {
2317 if (ret & IB_MAD_RESULT_CONSUMED)
2319 if (ret & IB_MAD_RESULT_REPLY) {
2320 agent_send_response((const struct ib_mad_hdr *)response->mad,
2324 qp_info->qp->qp_num,
2331 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2333 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2335 * recv is freed up in error cases in ib_mad_complete_recv
2336 * or via recv_handler in ib_mad_complete_recv()
2339 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2340 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2341 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2342 port_priv->device, port_num,
2343 qp_info->qp->qp_num, mad_size, opa);
2347 /* Post another receive request for this QP */
2349 ib_mad_post_receive_mads(qp_info, response);
2352 ib_mad_post_receive_mads(qp_info, recv);
2355 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2357 struct ib_mad_send_wr_private *mad_send_wr;
2358 unsigned long delay;
2360 if (list_empty(&mad_agent_priv->wait_list)) {
2361 cancel_delayed_work(&mad_agent_priv->timed_work);
2363 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2364 struct ib_mad_send_wr_private,
2367 if (time_after(mad_agent_priv->timeout,
2368 mad_send_wr->timeout)) {
2369 mad_agent_priv->timeout = mad_send_wr->timeout;
2370 delay = mad_send_wr->timeout - jiffies;
2371 if ((long)delay <= 0)
2373 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2374 &mad_agent_priv->timed_work, delay);
2379 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2381 struct ib_mad_agent_private *mad_agent_priv;
2382 struct ib_mad_send_wr_private *temp_mad_send_wr;
2383 struct list_head *list_item;
2384 unsigned long delay;
2386 mad_agent_priv = mad_send_wr->mad_agent_priv;
2387 list_del(&mad_send_wr->agent_list);
2389 delay = mad_send_wr->timeout;
2390 mad_send_wr->timeout += jiffies;
2393 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2394 temp_mad_send_wr = list_entry(list_item,
2395 struct ib_mad_send_wr_private,
2397 if (time_after(mad_send_wr->timeout,
2398 temp_mad_send_wr->timeout))
2403 list_item = &mad_agent_priv->wait_list;
2404 list_add(&mad_send_wr->agent_list, list_item);
2406 /* Reschedule a work item if we have a shorter timeout */
2407 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2408 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2409 &mad_agent_priv->timed_work, delay);
2412 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2415 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2416 wait_for_response(mad_send_wr);
2420 * Process a send work completion
2422 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2423 struct ib_mad_send_wc *mad_send_wc)
2425 struct ib_mad_agent_private *mad_agent_priv;
2426 unsigned long flags;
2429 mad_agent_priv = mad_send_wr->mad_agent_priv;
2430 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2431 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2432 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2433 if (ret == IB_RMPP_RESULT_CONSUMED)
2436 ret = IB_RMPP_RESULT_UNHANDLED;
2438 if (mad_send_wc->status != IB_WC_SUCCESS &&
2439 mad_send_wr->status == IB_WC_SUCCESS) {
2440 mad_send_wr->status = mad_send_wc->status;
2441 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2444 if (--mad_send_wr->refcount > 0) {
2445 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2446 mad_send_wr->status == IB_WC_SUCCESS) {
2447 wait_for_response(mad_send_wr);
2452 /* Remove send from MAD agent and notify client of completion */
2453 list_del(&mad_send_wr->agent_list);
2454 adjust_timeout(mad_agent_priv);
2455 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2457 if (mad_send_wr->status != IB_WC_SUCCESS )
2458 mad_send_wc->status = mad_send_wr->status;
2459 if (ret == IB_RMPP_RESULT_INTERNAL)
2460 ib_rmpp_send_handler(mad_send_wc);
2462 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2465 /* Release reference on agent taken when sending */
2466 deref_mad_agent(mad_agent_priv);
2469 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2472 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2474 struct ib_mad_port_private *port_priv = cq->cq_context;
2475 struct ib_mad_list_head *mad_list =
2476 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2477 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2478 struct ib_mad_qp_info *qp_info;
2479 struct ib_mad_queue *send_queue;
2480 struct ib_mad_send_wc mad_send_wc;
2481 unsigned long flags;
2484 if (list_empty_careful(&port_priv->port_list))
2487 if (wc->status != IB_WC_SUCCESS) {
2488 if (!ib_mad_send_error(port_priv, wc))
2492 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2494 send_queue = mad_list->mad_queue;
2495 qp_info = send_queue->qp_info;
2498 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2499 mad_send_wr->header_mapping,
2500 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2501 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2502 mad_send_wr->payload_mapping,
2503 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2504 queued_send_wr = NULL;
2505 spin_lock_irqsave(&send_queue->lock, flags);
2506 list_del(&mad_list->list);
2508 /* Move queued send to the send queue */
2509 if (send_queue->count-- > send_queue->max_active) {
2510 mad_list = container_of(qp_info->overflow_list.next,
2511 struct ib_mad_list_head, list);
2512 queued_send_wr = container_of(mad_list,
2513 struct ib_mad_send_wr_private,
2515 list_move_tail(&mad_list->list, &send_queue->list);
2517 spin_unlock_irqrestore(&send_queue->lock, flags);
2519 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2520 mad_send_wc.status = wc->status;
2521 mad_send_wc.vendor_err = wc->vendor_err;
2522 if (atomic_read(&qp_info->snoop_count))
2523 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2524 IB_MAD_SNOOP_SEND_COMPLETIONS);
2525 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2527 if (queued_send_wr) {
2528 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2531 dev_err(&port_priv->device->dev,
2532 "ib_post_send failed: %d\n", ret);
2533 mad_send_wr = queued_send_wr;
2534 wc->status = IB_WC_LOC_QP_OP_ERR;
2540 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2542 struct ib_mad_send_wr_private *mad_send_wr;
2543 struct ib_mad_list_head *mad_list;
2544 unsigned long flags;
2546 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2547 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2548 mad_send_wr = container_of(mad_list,
2549 struct ib_mad_send_wr_private,
2551 mad_send_wr->retry = 1;
2553 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2556 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2559 struct ib_mad_list_head *mad_list =
2560 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2561 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2562 struct ib_mad_send_wr_private *mad_send_wr;
2566 * Send errors will transition the QP to SQE - move
2567 * QP to RTS and repost flushed work requests
2569 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2571 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2572 if (mad_send_wr->retry) {
2574 mad_send_wr->retry = 0;
2575 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2581 struct ib_qp_attr *attr;
2583 /* Transition QP to RTS and fail offending send */
2584 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2586 attr->qp_state = IB_QPS_RTS;
2587 attr->cur_qp_state = IB_QPS_SQE;
2588 ret = ib_modify_qp(qp_info->qp, attr,
2589 IB_QP_STATE | IB_QP_CUR_STATE);
2592 dev_err(&port_priv->device->dev,
2593 "%s - ib_modify_qp to RTS: %d\n",
2596 mark_sends_for_retry(qp_info);
2603 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2605 unsigned long flags;
2606 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2607 struct ib_mad_send_wc mad_send_wc;
2608 struct list_head cancel_list;
2610 INIT_LIST_HEAD(&cancel_list);
2612 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2613 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2614 &mad_agent_priv->send_list, agent_list) {
2615 if (mad_send_wr->status == IB_WC_SUCCESS) {
2616 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2617 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2621 /* Empty wait list to prevent receives from finding a request */
2622 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2623 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2625 /* Report all cancelled requests */
2626 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2627 mad_send_wc.vendor_err = 0;
2629 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2630 &cancel_list, agent_list) {
2631 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2632 list_del(&mad_send_wr->agent_list);
2633 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2635 atomic_dec(&mad_agent_priv->refcount);
2639 static struct ib_mad_send_wr_private*
2640 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2641 struct ib_mad_send_buf *send_buf)
2643 struct ib_mad_send_wr_private *mad_send_wr;
2645 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2647 if (&mad_send_wr->send_buf == send_buf)
2651 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2653 if (is_rmpp_data_mad(mad_agent_priv,
2654 mad_send_wr->send_buf.mad) &&
2655 &mad_send_wr->send_buf == send_buf)
2661 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2662 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2664 struct ib_mad_agent_private *mad_agent_priv;
2665 struct ib_mad_send_wr_private *mad_send_wr;
2666 unsigned long flags;
2669 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2671 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2672 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2673 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2674 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2678 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2680 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2681 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2684 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2686 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2688 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2690 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2693 EXPORT_SYMBOL(ib_modify_mad);
2695 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2696 struct ib_mad_send_buf *send_buf)
2698 ib_modify_mad(mad_agent, send_buf, 0);
2700 EXPORT_SYMBOL(ib_cancel_mad);
2702 static void local_completions(struct work_struct *work)
2704 struct ib_mad_agent_private *mad_agent_priv;
2705 struct ib_mad_local_private *local;
2706 struct ib_mad_agent_private *recv_mad_agent;
2707 unsigned long flags;
2710 struct ib_mad_send_wc mad_send_wc;
2714 container_of(work, struct ib_mad_agent_private, local_work);
2716 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2717 mad_agent_priv->qp_info->port_priv->port_num);
2719 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2720 while (!list_empty(&mad_agent_priv->local_list)) {
2721 local = list_entry(mad_agent_priv->local_list.next,
2722 struct ib_mad_local_private,
2724 list_del(&local->completion_list);
2725 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2727 if (local->mad_priv) {
2729 recv_mad_agent = local->recv_mad_agent;
2730 if (!recv_mad_agent) {
2731 dev_err(&mad_agent_priv->agent.device->dev,
2732 "No receive MAD agent for local completion\n");
2734 goto local_send_completion;
2738 * Defined behavior is to complete response
2741 build_smp_wc(recv_mad_agent->agent.qp,
2742 local->mad_send_wr->send_wr.wr.wr_cqe,
2743 be16_to_cpu(IB_LID_PERMISSIVE),
2744 local->mad_send_wr->send_wr.pkey_index,
2745 recv_mad_agent->agent.port_num, &wc);
2747 local->mad_priv->header.recv_wc.wc = &wc;
2749 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2750 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2751 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2752 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2754 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2755 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2758 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2759 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2760 &local->mad_priv->header.recv_wc.rmpp_list);
2761 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2762 local->mad_priv->header.recv_wc.recv_buf.mad =
2763 (struct ib_mad *)local->mad_priv->mad;
2764 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2765 snoop_recv(recv_mad_agent->qp_info,
2766 &local->mad_priv->header.recv_wc,
2767 IB_MAD_SNOOP_RECVS);
2768 recv_mad_agent->agent.recv_handler(
2769 &recv_mad_agent->agent,
2770 &local->mad_send_wr->send_buf,
2771 &local->mad_priv->header.recv_wc);
2772 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2773 atomic_dec(&recv_mad_agent->refcount);
2774 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2777 local_send_completion:
2779 mad_send_wc.status = IB_WC_SUCCESS;
2780 mad_send_wc.vendor_err = 0;
2781 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2782 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2783 snoop_send(mad_agent_priv->qp_info,
2784 &local->mad_send_wr->send_buf,
2785 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2786 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2789 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2790 atomic_dec(&mad_agent_priv->refcount);
2792 kfree(local->mad_priv);
2795 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2798 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2802 if (!mad_send_wr->retries_left)
2805 mad_send_wr->retries_left--;
2806 mad_send_wr->send_buf.retries++;
2808 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2810 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2811 ret = ib_retry_rmpp(mad_send_wr);
2813 case IB_RMPP_RESULT_UNHANDLED:
2814 ret = ib_send_mad(mad_send_wr);
2816 case IB_RMPP_RESULT_CONSUMED:
2824 ret = ib_send_mad(mad_send_wr);
2827 mad_send_wr->refcount++;
2828 list_add_tail(&mad_send_wr->agent_list,
2829 &mad_send_wr->mad_agent_priv->send_list);
2834 static void timeout_sends(struct work_struct *work)
2836 struct ib_mad_agent_private *mad_agent_priv;
2837 struct ib_mad_send_wr_private *mad_send_wr;
2838 struct ib_mad_send_wc mad_send_wc;
2839 unsigned long flags, delay;
2841 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2843 mad_send_wc.vendor_err = 0;
2845 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2846 while (!list_empty(&mad_agent_priv->wait_list)) {
2847 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2848 struct ib_mad_send_wr_private,
2851 if (time_after(mad_send_wr->timeout, jiffies)) {
2852 delay = mad_send_wr->timeout - jiffies;
2853 if ((long)delay <= 0)
2855 queue_delayed_work(mad_agent_priv->qp_info->
2857 &mad_agent_priv->timed_work, delay);
2861 list_del(&mad_send_wr->agent_list);
2862 if (mad_send_wr->status == IB_WC_SUCCESS &&
2863 !retry_send(mad_send_wr))
2866 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2868 if (mad_send_wr->status == IB_WC_SUCCESS)
2869 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2871 mad_send_wc.status = mad_send_wr->status;
2872 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2873 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2876 atomic_dec(&mad_agent_priv->refcount);
2877 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2879 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2883 * Allocate receive MADs and post receive WRs for them
2885 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2886 struct ib_mad_private *mad)
2888 unsigned long flags;
2890 struct ib_mad_private *mad_priv;
2891 struct ib_sge sg_list;
2892 struct ib_recv_wr recv_wr;
2893 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2895 /* Initialize common scatter list fields */
2896 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2898 /* Initialize common receive WR fields */
2899 recv_wr.next = NULL;
2900 recv_wr.sg_list = &sg_list;
2901 recv_wr.num_sge = 1;
2904 /* Allocate and map receive buffer */
2909 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2916 sg_list.length = mad_priv_dma_size(mad_priv);
2917 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2919 mad_priv_dma_size(mad_priv),
2921 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2927 mad_priv->header.mapping = sg_list.addr;
2928 mad_priv->header.mad_list.mad_queue = recv_queue;
2929 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2930 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2932 /* Post receive WR */
2933 spin_lock_irqsave(&recv_queue->lock, flags);
2934 post = (++recv_queue->count < recv_queue->max_active);
2935 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2936 spin_unlock_irqrestore(&recv_queue->lock, flags);
2937 ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2939 spin_lock_irqsave(&recv_queue->lock, flags);
2940 list_del(&mad_priv->header.mad_list.list);
2941 recv_queue->count--;
2942 spin_unlock_irqrestore(&recv_queue->lock, flags);
2943 ib_dma_unmap_single(qp_info->port_priv->device,
2944 mad_priv->header.mapping,
2945 mad_priv_dma_size(mad_priv),
2948 dev_err(&qp_info->port_priv->device->dev,
2949 "ib_post_recv failed: %d\n", ret);
2958 * Return all the posted receive MADs
2960 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2962 struct ib_mad_private_header *mad_priv_hdr;
2963 struct ib_mad_private *recv;
2964 struct ib_mad_list_head *mad_list;
2969 while (!list_empty(&qp_info->recv_queue.list)) {
2971 mad_list = list_entry(qp_info->recv_queue.list.next,
2972 struct ib_mad_list_head, list);
2973 mad_priv_hdr = container_of(mad_list,
2974 struct ib_mad_private_header,
2976 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2979 /* Remove from posted receive MAD list */
2980 list_del(&mad_list->list);
2982 ib_dma_unmap_single(qp_info->port_priv->device,
2983 recv->header.mapping,
2984 mad_priv_dma_size(recv),
2989 qp_info->recv_queue.count = 0;
2995 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2998 struct ib_qp_attr *attr;
3002 attr = kmalloc(sizeof *attr, GFP_KERNEL);
3006 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
3007 IB_DEFAULT_PKEY_FULL, &pkey_index);
3011 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3012 qp = port_priv->qp_info[i].qp;
3017 * PKey index for QP1 is irrelevant but
3018 * one is needed for the Reset to Init transition
3020 attr->qp_state = IB_QPS_INIT;
3021 attr->pkey_index = pkey_index;
3022 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
3023 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
3024 IB_QP_PKEY_INDEX | IB_QP_QKEY);
3026 dev_err(&port_priv->device->dev,
3027 "Couldn't change QP%d state to INIT: %d\n",
3032 attr->qp_state = IB_QPS_RTR;
3033 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3035 dev_err(&port_priv->device->dev,
3036 "Couldn't change QP%d state to RTR: %d\n",
3041 attr->qp_state = IB_QPS_RTS;
3042 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3043 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3045 dev_err(&port_priv->device->dev,
3046 "Couldn't change QP%d state to RTS: %d\n",
3052 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3054 dev_err(&port_priv->device->dev,
3055 "Failed to request completion notification: %d\n",
3060 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3061 if (!port_priv->qp_info[i].qp)
3064 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3066 dev_err(&port_priv->device->dev,
3067 "Couldn't post receive WRs\n");
3076 static void qp_event_handler(struct ib_event *event, void *qp_context)
3078 struct ib_mad_qp_info *qp_info = qp_context;
3080 /* It's worse than that! He's dead, Jim! */
3081 dev_err(&qp_info->port_priv->device->dev,
3082 "Fatal error (%d) on MAD QP (%d)\n",
3083 event->event, qp_info->qp->qp_num);
3086 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3087 struct ib_mad_queue *mad_queue)
3089 mad_queue->qp_info = qp_info;
3090 mad_queue->count = 0;
3091 spin_lock_init(&mad_queue->lock);
3092 INIT_LIST_HEAD(&mad_queue->list);
3095 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3096 struct ib_mad_qp_info *qp_info)
3098 qp_info->port_priv = port_priv;
3099 init_mad_queue(qp_info, &qp_info->send_queue);
3100 init_mad_queue(qp_info, &qp_info->recv_queue);
3101 INIT_LIST_HEAD(&qp_info->overflow_list);
3102 spin_lock_init(&qp_info->snoop_lock);
3103 qp_info->snoop_table = NULL;
3104 qp_info->snoop_table_size = 0;
3105 atomic_set(&qp_info->snoop_count, 0);
3108 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3109 enum ib_qp_type qp_type)
3111 struct ib_qp_init_attr qp_init_attr;
3114 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3115 qp_init_attr.send_cq = qp_info->port_priv->cq;
3116 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3117 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3118 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3119 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3120 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3121 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3122 qp_init_attr.qp_type = qp_type;
3123 qp_init_attr.port_num = qp_info->port_priv->port_num;
3124 qp_init_attr.qp_context = qp_info;
3125 qp_init_attr.event_handler = qp_event_handler;
3126 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3127 if (IS_ERR(qp_info->qp)) {
3128 dev_err(&qp_info->port_priv->device->dev,
3129 "Couldn't create ib_mad QP%d\n",
3130 get_spl_qp_index(qp_type));
3131 ret = PTR_ERR(qp_info->qp);
3134 /* Use minimum queue sizes unless the CQ is resized */
3135 qp_info->send_queue.max_active = mad_sendq_size;
3136 qp_info->recv_queue.max_active = mad_recvq_size;
3143 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3148 ib_destroy_qp(qp_info->qp);
3149 kfree(qp_info->snoop_table);
3154 * Create the QP, PD, MR, and CQ if needed
3156 static int ib_mad_port_open(struct ib_device *device,
3160 struct ib_mad_port_private *port_priv;
3161 unsigned long flags;
3162 char name[sizeof "ib_mad123"];
3165 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3168 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3169 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3172 /* Create new device info */
3173 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3177 port_priv->device = device;
3178 port_priv->port_num = port_num;
3179 spin_lock_init(&port_priv->reg_lock);
3180 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3181 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3183 cq_size = mad_sendq_size + mad_recvq_size;
3184 has_smi = rdma_cap_ib_smi(device, port_num);
3188 port_priv->pd = ib_alloc_pd(device, 0);
3189 if (IS_ERR(port_priv->pd)) {
3190 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3191 ret = PTR_ERR(port_priv->pd);
3195 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3196 IB_POLL_UNBOUND_WORKQUEUE);
3197 if (IS_ERR(port_priv->cq)) {
3198 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3199 ret = PTR_ERR(port_priv->cq);
3204 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3208 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3212 snprintf(name, sizeof name, "ib_mad%d", port_num);
3213 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3214 if (!port_priv->wq) {
3219 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3220 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3221 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3223 ret = ib_mad_port_start(port_priv);
3225 dev_err(&device->dev, "Couldn't start port\n");
3232 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3233 list_del_init(&port_priv->port_list);
3234 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3236 destroy_workqueue(port_priv->wq);
3238 destroy_mad_qp(&port_priv->qp_info[1]);
3240 destroy_mad_qp(&port_priv->qp_info[0]);
3242 ib_free_cq(port_priv->cq);
3243 cleanup_recv_queue(&port_priv->qp_info[1]);
3244 cleanup_recv_queue(&port_priv->qp_info[0]);
3246 ib_dealloc_pd(port_priv->pd);
3255 * If there are no classes using the port, free the port
3256 * resources (CQ, MR, PD, QP) and remove the port's info structure
3258 static int ib_mad_port_close(struct ib_device *device, int port_num)
3260 struct ib_mad_port_private *port_priv;
3261 unsigned long flags;
3263 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3264 port_priv = __ib_get_mad_port(device, port_num);
3265 if (port_priv == NULL) {
3266 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3267 dev_err(&device->dev, "Port %d not found\n", port_num);
3270 list_del_init(&port_priv->port_list);
3271 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3273 destroy_workqueue(port_priv->wq);
3274 destroy_mad_qp(&port_priv->qp_info[1]);
3275 destroy_mad_qp(&port_priv->qp_info[0]);
3276 ib_free_cq(port_priv->cq);
3277 ib_dealloc_pd(port_priv->pd);
3278 cleanup_recv_queue(&port_priv->qp_info[1]);
3279 cleanup_recv_queue(&port_priv->qp_info[0]);
3280 /* XXX: Handle deallocation of MAD registration tables */
3287 static void ib_mad_init_device(struct ib_device *device)
3291 start = rdma_start_port(device);
3293 for (i = start; i <= rdma_end_port(device); i++) {
3294 if (!rdma_cap_ib_mad(device, i))
3297 if (ib_mad_port_open(device, i)) {
3298 dev_err(&device->dev, "Couldn't open port %d\n", i);
3301 if (ib_agent_port_open(device, i)) {
3302 dev_err(&device->dev,
3303 "Couldn't open port %d for agents\n", i);
3310 if (ib_mad_port_close(device, i))
3311 dev_err(&device->dev, "Couldn't close port %d\n", i);
3314 while (--i >= start) {
3315 if (!rdma_cap_ib_mad(device, i))
3318 if (ib_agent_port_close(device, i))
3319 dev_err(&device->dev,
3320 "Couldn't close port %d for agents\n", i);
3321 if (ib_mad_port_close(device, i))
3322 dev_err(&device->dev, "Couldn't close port %d\n", i);
3326 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3330 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3331 if (!rdma_cap_ib_mad(device, i))
3334 if (ib_agent_port_close(device, i))
3335 dev_err(&device->dev,
3336 "Couldn't close port %d for agents\n", i);
3337 if (ib_mad_port_close(device, i))
3338 dev_err(&device->dev, "Couldn't close port %d\n", i);
3342 static struct ib_client mad_client = {
3344 .add = ib_mad_init_device,
3345 .remove = ib_mad_remove_device
3348 int ib_mad_init(void)
3350 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3351 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3353 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3354 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3356 INIT_LIST_HEAD(&ib_mad_port_list);
3358 /* Client ID 0 is used for snoop-only clients */
3359 idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL);
3361 if (ib_register_client(&mad_client)) {
3362 pr_err("Couldn't register ib_mad client\n");
3369 void ib_mad_cleanup(void)
3371 ib_unregister_client(&mad_client);