2 * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2014 Intel Corporation. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <rdma/ib_cache.h>
50 #include "core_priv.h"
52 static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
53 static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
55 module_param_named(send_queue_size, mad_sendq_size, int, 0444);
56 MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
57 module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
58 MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
60 static struct list_head ib_mad_port_list;
61 static u32 ib_mad_client_id = 0;
64 static DEFINE_SPINLOCK(ib_mad_port_list_lock);
66 /* Forward declarations */
67 static int method_in_use(struct ib_mad_mgmt_method_table **method,
68 struct ib_mad_reg_req *mad_reg_req);
69 static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
70 static struct ib_mad_agent_private *find_mad_agent(
71 struct ib_mad_port_private *port_priv,
72 const struct ib_mad_hdr *mad);
73 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
74 struct ib_mad_private *mad);
75 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
76 static void timeout_sends(struct work_struct *work);
77 static void local_completions(struct work_struct *work);
78 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
79 struct ib_mad_agent_private *agent_priv,
81 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
82 struct ib_mad_agent_private *agent_priv);
83 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
85 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
88 * Returns a ib_mad_port_private structure or NULL for a device/port
89 * Assumes ib_mad_port_list_lock is being held
91 static inline struct ib_mad_port_private *
92 __ib_get_mad_port(struct ib_device *device, int port_num)
94 struct ib_mad_port_private *entry;
96 list_for_each_entry(entry, &ib_mad_port_list, port_list) {
97 if (entry->device == device && entry->port_num == port_num)
104 * Wrapper function to return a ib_mad_port_private structure or NULL
107 static inline struct ib_mad_port_private *
108 ib_get_mad_port(struct ib_device *device, int port_num)
110 struct ib_mad_port_private *entry;
113 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
114 entry = __ib_get_mad_port(device, port_num);
115 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
120 static inline u8 convert_mgmt_class(u8 mgmt_class)
122 /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
123 return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
127 static int get_spl_qp_index(enum ib_qp_type qp_type)
140 static int vendor_class_index(u8 mgmt_class)
142 return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
145 static int is_vendor_class(u8 mgmt_class)
147 if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
148 (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
153 static int is_vendor_oui(char *oui)
155 if (oui[0] || oui[1] || oui[2])
160 static int is_vendor_method_in_use(
161 struct ib_mad_mgmt_vendor_class *vendor_class,
162 struct ib_mad_reg_req *mad_reg_req)
164 struct ib_mad_mgmt_method_table *method;
167 for (i = 0; i < MAX_MGMT_OUI; i++) {
168 if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
169 method = vendor_class->method_table[i];
171 if (method_in_use(&method, mad_reg_req))
181 int ib_response_mad(const struct ib_mad_hdr *hdr)
183 return ((hdr->method & IB_MGMT_METHOD_RESP) ||
184 (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
185 ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
186 (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
188 EXPORT_SYMBOL(ib_response_mad);
191 * ib_register_mad_agent - Register to send/receive MADs
193 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195 enum ib_qp_type qp_type,
196 struct ib_mad_reg_req *mad_reg_req,
198 ib_mad_send_handler send_handler,
199 ib_mad_recv_handler recv_handler,
201 u32 registration_flags)
203 struct ib_mad_port_private *port_priv;
204 struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
205 struct ib_mad_agent_private *mad_agent_priv;
206 struct ib_mad_reg_req *reg_req = NULL;
207 struct ib_mad_mgmt_class_table *class;
208 struct ib_mad_mgmt_vendor_class_table *vendor;
209 struct ib_mad_mgmt_vendor_class *vendor_class;
210 struct ib_mad_mgmt_method_table *method;
213 u8 mgmt_class, vclass;
215 /* Validate parameters */
216 qpn = get_spl_qp_index(qp_type);
218 dev_notice(&device->dev,
219 "ib_register_mad_agent: invalid QP Type %d\n",
224 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
225 dev_notice(&device->dev,
226 "ib_register_mad_agent: invalid RMPP Version %u\n",
231 /* Validate MAD registration request if supplied */
233 if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
234 dev_notice(&device->dev,
235 "ib_register_mad_agent: invalid Class Version %u\n",
236 mad_reg_req->mgmt_class_version);
240 dev_notice(&device->dev,
241 "ib_register_mad_agent: no recv_handler\n");
244 if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
246 * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
247 * one in this range currently allowed
249 if (mad_reg_req->mgmt_class !=
250 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
251 dev_notice(&device->dev,
252 "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
253 mad_reg_req->mgmt_class);
256 } else if (mad_reg_req->mgmt_class == 0) {
258 * Class 0 is reserved in IBA and is used for
259 * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
261 dev_notice(&device->dev,
262 "ib_register_mad_agent: Invalid Mgmt Class 0\n");
264 } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
266 * If class is in "new" vendor range,
267 * ensure supplied OUI is not zero
269 if (!is_vendor_oui(mad_reg_req->oui)) {
270 dev_notice(&device->dev,
271 "ib_register_mad_agent: No OUI specified for class 0x%x\n",
272 mad_reg_req->mgmt_class);
276 /* Make sure class supplied is consistent with RMPP */
277 if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
279 dev_notice(&device->dev,
280 "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
281 mad_reg_req->mgmt_class);
286 /* Make sure class supplied is consistent with QP type */
287 if (qp_type == IB_QPT_SMI) {
288 if ((mad_reg_req->mgmt_class !=
289 IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
290 (mad_reg_req->mgmt_class !=
291 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
292 dev_notice(&device->dev,
293 "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
294 mad_reg_req->mgmt_class);
298 if ((mad_reg_req->mgmt_class ==
299 IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
300 (mad_reg_req->mgmt_class ==
301 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
302 dev_notice(&device->dev,
303 "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
304 mad_reg_req->mgmt_class);
309 /* No registration request supplied */
312 if (registration_flags & IB_MAD_USER_RMPP)
316 /* Validate device and port */
317 port_priv = ib_get_mad_port(device, port_num);
319 dev_notice(&device->dev, "ib_register_mad_agent: Invalid port\n");
320 ret = ERR_PTR(-ENODEV);
324 /* Verify the QP requested is supported. For example, Ethernet devices
325 * will not have QP0 */
326 if (!port_priv->qp_info[qpn].qp) {
327 dev_notice(&device->dev,
328 "ib_register_mad_agent: QP %d not supported\n", qpn);
329 ret = ERR_PTR(-EPROTONOSUPPORT);
333 /* Allocate structures */
334 mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
335 if (!mad_agent_priv) {
336 ret = ERR_PTR(-ENOMEM);
341 reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
343 ret = ERR_PTR(-ENOMEM);
348 /* Now, fill in the various structures */
349 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
350 mad_agent_priv->reg_req = reg_req;
351 mad_agent_priv->agent.rmpp_version = rmpp_version;
352 mad_agent_priv->agent.device = device;
353 mad_agent_priv->agent.recv_handler = recv_handler;
354 mad_agent_priv->agent.send_handler = send_handler;
355 mad_agent_priv->agent.context = context;
356 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
357 mad_agent_priv->agent.port_num = port_num;
358 mad_agent_priv->agent.flags = registration_flags;
359 spin_lock_init(&mad_agent_priv->lock);
360 INIT_LIST_HEAD(&mad_agent_priv->send_list);
361 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
362 INIT_LIST_HEAD(&mad_agent_priv->done_list);
363 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
364 INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
365 INIT_LIST_HEAD(&mad_agent_priv->local_list);
366 INIT_WORK(&mad_agent_priv->local_work, local_completions);
367 atomic_set(&mad_agent_priv->refcount, 1);
368 init_completion(&mad_agent_priv->comp);
370 spin_lock_irqsave(&port_priv->reg_lock, flags);
371 mad_agent_priv->agent.hi_tid = ++ib_mad_client_id;
374 * Make sure MAD registration (if supplied)
375 * is non overlapping with any existing ones
378 mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
379 if (!is_vendor_class(mgmt_class)) {
380 class = port_priv->version[mad_reg_req->
381 mgmt_class_version].class;
383 method = class->method_table[mgmt_class];
385 if (method_in_use(&method,
390 ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
393 /* "New" vendor class range */
394 vendor = port_priv->version[mad_reg_req->
395 mgmt_class_version].vendor;
397 vclass = vendor_class_index(mgmt_class);
398 vendor_class = vendor->vendor_class[vclass];
400 if (is_vendor_method_in_use(
406 ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
414 /* Add mad agent into port's agent list */
415 list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list);
416 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
418 return &mad_agent_priv->agent;
421 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
424 kfree(mad_agent_priv);
428 EXPORT_SYMBOL(ib_register_mad_agent);
430 static inline int is_snooping_sends(int mad_snoop_flags)
432 return (mad_snoop_flags &
433 (/*IB_MAD_SNOOP_POSTED_SENDS |
434 IB_MAD_SNOOP_RMPP_SENDS |*/
435 IB_MAD_SNOOP_SEND_COMPLETIONS /*|
436 IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS*/));
439 static inline int is_snooping_recvs(int mad_snoop_flags)
441 return (mad_snoop_flags &
442 (IB_MAD_SNOOP_RECVS /*|
443 IB_MAD_SNOOP_RMPP_RECVS*/));
446 static int register_snoop_agent(struct ib_mad_qp_info *qp_info,
447 struct ib_mad_snoop_private *mad_snoop_priv)
449 struct ib_mad_snoop_private **new_snoop_table;
453 spin_lock_irqsave(&qp_info->snoop_lock, flags);
454 /* Check for empty slot in array. */
455 for (i = 0; i < qp_info->snoop_table_size; i++)
456 if (!qp_info->snoop_table[i])
459 if (i == qp_info->snoop_table_size) {
461 new_snoop_table = krealloc(qp_info->snoop_table,
462 sizeof mad_snoop_priv *
463 (qp_info->snoop_table_size + 1),
465 if (!new_snoop_table) {
470 qp_info->snoop_table = new_snoop_table;
471 qp_info->snoop_table_size++;
473 qp_info->snoop_table[i] = mad_snoop_priv;
474 atomic_inc(&qp_info->snoop_count);
476 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
480 struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
482 enum ib_qp_type qp_type,
484 ib_mad_snoop_handler snoop_handler,
485 ib_mad_recv_handler recv_handler,
488 struct ib_mad_port_private *port_priv;
489 struct ib_mad_agent *ret;
490 struct ib_mad_snoop_private *mad_snoop_priv;
493 /* Validate parameters */
494 if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) ||
495 (is_snooping_recvs(mad_snoop_flags) && !recv_handler)) {
496 ret = ERR_PTR(-EINVAL);
499 qpn = get_spl_qp_index(qp_type);
501 ret = ERR_PTR(-EINVAL);
504 port_priv = ib_get_mad_port(device, port_num);
506 ret = ERR_PTR(-ENODEV);
509 /* Allocate structures */
510 mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
511 if (!mad_snoop_priv) {
512 ret = ERR_PTR(-ENOMEM);
516 /* Now, fill in the various structures */
517 mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
518 mad_snoop_priv->agent.device = device;
519 mad_snoop_priv->agent.recv_handler = recv_handler;
520 mad_snoop_priv->agent.snoop_handler = snoop_handler;
521 mad_snoop_priv->agent.context = context;
522 mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp;
523 mad_snoop_priv->agent.port_num = port_num;
524 mad_snoop_priv->mad_snoop_flags = mad_snoop_flags;
525 init_completion(&mad_snoop_priv->comp);
526 mad_snoop_priv->snoop_index = register_snoop_agent(
527 &port_priv->qp_info[qpn],
529 if (mad_snoop_priv->snoop_index < 0) {
530 ret = ERR_PTR(mad_snoop_priv->snoop_index);
534 atomic_set(&mad_snoop_priv->refcount, 1);
535 return &mad_snoop_priv->agent;
538 kfree(mad_snoop_priv);
542 EXPORT_SYMBOL(ib_register_mad_snoop);
544 static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
546 if (atomic_dec_and_test(&mad_agent_priv->refcount))
547 complete(&mad_agent_priv->comp);
550 static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv)
552 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
553 complete(&mad_snoop_priv->comp);
556 static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
558 struct ib_mad_port_private *port_priv;
561 /* Note that we could still be handling received MADs */
564 * Canceling all sends results in dropping received response
565 * MADs, preventing us from queuing additional work
567 cancel_mads(mad_agent_priv);
568 port_priv = mad_agent_priv->qp_info->port_priv;
569 cancel_delayed_work(&mad_agent_priv->timed_work);
571 spin_lock_irqsave(&port_priv->reg_lock, flags);
572 remove_mad_reg_req(mad_agent_priv);
573 list_del(&mad_agent_priv->agent_list);
574 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
576 flush_workqueue(port_priv->wq);
577 ib_cancel_rmpp_recvs(mad_agent_priv);
579 deref_mad_agent(mad_agent_priv);
580 wait_for_completion(&mad_agent_priv->comp);
582 kfree(mad_agent_priv->reg_req);
583 kfree(mad_agent_priv);
586 static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv)
588 struct ib_mad_qp_info *qp_info;
591 qp_info = mad_snoop_priv->qp_info;
592 spin_lock_irqsave(&qp_info->snoop_lock, flags);
593 qp_info->snoop_table[mad_snoop_priv->snoop_index] = NULL;
594 atomic_dec(&qp_info->snoop_count);
595 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
597 deref_snoop_agent(mad_snoop_priv);
598 wait_for_completion(&mad_snoop_priv->comp);
600 kfree(mad_snoop_priv);
604 * ib_unregister_mad_agent - Unregisters a client from using MAD services
606 int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
608 struct ib_mad_agent_private *mad_agent_priv;
609 struct ib_mad_snoop_private *mad_snoop_priv;
611 /* If the TID is zero, the agent can only snoop. */
612 if (mad_agent->hi_tid) {
613 mad_agent_priv = container_of(mad_agent,
614 struct ib_mad_agent_private,
616 unregister_mad_agent(mad_agent_priv);
618 mad_snoop_priv = container_of(mad_agent,
619 struct ib_mad_snoop_private,
621 unregister_mad_snoop(mad_snoop_priv);
625 EXPORT_SYMBOL(ib_unregister_mad_agent);
627 static void dequeue_mad(struct ib_mad_list_head *mad_list)
629 struct ib_mad_queue *mad_queue;
632 BUG_ON(!mad_list->mad_queue);
633 mad_queue = mad_list->mad_queue;
634 spin_lock_irqsave(&mad_queue->lock, flags);
635 list_del(&mad_list->list);
637 spin_unlock_irqrestore(&mad_queue->lock, flags);
640 static void snoop_send(struct ib_mad_qp_info *qp_info,
641 struct ib_mad_send_buf *send_buf,
642 struct ib_mad_send_wc *mad_send_wc,
645 struct ib_mad_snoop_private *mad_snoop_priv;
649 spin_lock_irqsave(&qp_info->snoop_lock, flags);
650 for (i = 0; i < qp_info->snoop_table_size; i++) {
651 mad_snoop_priv = qp_info->snoop_table[i];
652 if (!mad_snoop_priv ||
653 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
656 atomic_inc(&mad_snoop_priv->refcount);
657 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
658 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
659 send_buf, mad_send_wc);
660 deref_snoop_agent(mad_snoop_priv);
661 spin_lock_irqsave(&qp_info->snoop_lock, flags);
663 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
666 static void snoop_recv(struct ib_mad_qp_info *qp_info,
667 struct ib_mad_recv_wc *mad_recv_wc,
670 struct ib_mad_snoop_private *mad_snoop_priv;
674 spin_lock_irqsave(&qp_info->snoop_lock, flags);
675 for (i = 0; i < qp_info->snoop_table_size; i++) {
676 mad_snoop_priv = qp_info->snoop_table[i];
677 if (!mad_snoop_priv ||
678 !(mad_snoop_priv->mad_snoop_flags & mad_snoop_flags))
681 atomic_inc(&mad_snoop_priv->refcount);
682 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
683 mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, NULL,
685 deref_snoop_agent(mad_snoop_priv);
686 spin_lock_irqsave(&qp_info->snoop_lock, flags);
688 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
691 static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
692 u16 pkey_index, u8 port_num, struct ib_wc *wc)
694 memset(wc, 0, sizeof *wc);
696 wc->status = IB_WC_SUCCESS;
697 wc->opcode = IB_WC_RECV;
698 wc->pkey_index = pkey_index;
699 wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
704 wc->dlid_path_bits = 0;
705 wc->port_num = port_num;
708 static size_t mad_priv_size(const struct ib_mad_private *mp)
710 return sizeof(struct ib_mad_private) + mp->mad_size;
713 static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
715 size_t size = sizeof(struct ib_mad_private) + mad_size;
716 struct ib_mad_private *ret = kzalloc(size, flags);
719 ret->mad_size = mad_size;
724 static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
726 return rdma_max_mad_size(port_priv->device, port_priv->port_num);
729 static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
731 return sizeof(struct ib_grh) + mp->mad_size;
735 * Return 0 if SMP is to be sent
736 * Return 1 if SMP was consumed locally (whether or not solicited)
737 * Return < 0 if error
739 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
740 struct ib_mad_send_wr_private *mad_send_wr)
743 struct ib_smp *smp = mad_send_wr->send_buf.mad;
744 struct opa_smp *opa_smp = (struct opa_smp *)smp;
746 struct ib_mad_local_private *local;
747 struct ib_mad_private *mad_priv;
748 struct ib_mad_port_private *port_priv;
749 struct ib_mad_agent_private *recv_mad_agent = NULL;
750 struct ib_device *device = mad_agent_priv->agent.device;
753 struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
754 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
755 u16 out_mad_pkey_index = 0;
757 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
758 mad_agent_priv->qp_info->port_priv->port_num);
760 if (rdma_cap_ib_switch(device) &&
761 smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
762 port_num = send_wr->port_num;
764 port_num = mad_agent_priv->agent.port_num;
767 * Directed route handling starts if the initial LID routed part of
768 * a request or the ending LID routed part of a response is empty.
769 * If we are at the start of the LID routed part, don't update the
770 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
772 if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) {
775 if ((opa_get_smp_direction(opa_smp)
776 ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
777 OPA_LID_PERMISSIVE &&
778 opa_smi_handle_dr_smp_send(opa_smp,
779 rdma_cap_ib_switch(device),
780 port_num) == IB_SMI_DISCARD) {
782 dev_err(&device->dev, "OPA Invalid directed route\n");
785 opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
786 if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
787 opa_drslid & 0xffff0000) {
789 dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
793 drslid = (u16)(opa_drslid & 0x0000ffff);
795 /* Check to post send on QP or process locally */
796 if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
797 opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
800 if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
802 smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
805 dev_err(&device->dev, "Invalid directed route\n");
808 drslid = be16_to_cpu(smp->dr_slid);
810 /* Check to post send on QP or process locally */
811 if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
812 smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
816 local = kmalloc(sizeof *local, GFP_ATOMIC);
819 dev_err(&device->dev, "No memory for ib_mad_local_private\n");
822 local->mad_priv = NULL;
823 local->recv_mad_agent = NULL;
824 mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
827 dev_err(&device->dev, "No memory for local response MAD\n");
832 build_smp_wc(mad_agent_priv->agent.qp,
833 send_wr->wr.wr_cqe, drslid,
835 send_wr->port_num, &mad_wc);
837 if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
838 mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
839 + mad_send_wr->send_buf.data_len
840 + sizeof(struct ib_grh);
843 /* No GRH for DR SMP */
844 ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
845 (const struct ib_mad_hdr *)smp, mad_size,
846 (struct ib_mad_hdr *)mad_priv->mad,
847 &mad_size, &out_mad_pkey_index);
850 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
851 if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
852 mad_agent_priv->agent.recv_handler) {
853 local->mad_priv = mad_priv;
854 local->recv_mad_agent = mad_agent_priv;
856 * Reference MAD agent until receive
857 * side of local completion handled
859 atomic_inc(&mad_agent_priv->refcount);
863 case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
866 case IB_MAD_RESULT_SUCCESS:
867 /* Treat like an incoming receive MAD */
868 port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
869 mad_agent_priv->agent.port_num);
871 memcpy(mad_priv->mad, smp, mad_priv->mad_size);
872 recv_mad_agent = find_mad_agent(port_priv,
873 (const struct ib_mad_hdr *)mad_priv->mad);
875 if (!port_priv || !recv_mad_agent) {
877 * No receiving agent so drop packet and
878 * generate send completion.
883 local->mad_priv = mad_priv;
884 local->recv_mad_agent = recv_mad_agent;
893 local->mad_send_wr = mad_send_wr;
895 local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
896 local->return_wc_byte_len = mad_size;
898 /* Reference MAD agent until send side of local completion handled */
899 atomic_inc(&mad_agent_priv->refcount);
900 /* Queue local completion to local list */
901 spin_lock_irqsave(&mad_agent_priv->lock, flags);
902 list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
903 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
904 queue_work(mad_agent_priv->qp_info->port_priv->wq,
905 &mad_agent_priv->local_work);
911 static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
915 seg_size = mad_size - hdr_len;
916 if (data_len && seg_size) {
917 pad = seg_size - data_len % seg_size;
918 return pad == seg_size ? 0 : pad;
923 static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
925 struct ib_rmpp_segment *s, *t;
927 list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
933 static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
934 size_t mad_size, gfp_t gfp_mask)
936 struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
937 struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
938 struct ib_rmpp_segment *seg = NULL;
939 int left, seg_size, pad;
941 send_buf->seg_size = mad_size - send_buf->hdr_len;
942 send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
943 seg_size = send_buf->seg_size;
946 /* Allocate data segments. */
947 for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
948 seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
950 dev_err(&send_buf->mad_agent->device->dev,
951 "alloc_send_rmpp_segs: RMPP mem alloc failed for len %zd, gfp %#x\n",
952 sizeof (*seg) + seg_size, gfp_mask);
953 free_send_rmpp_list(send_wr);
956 seg->num = ++send_buf->seg_count;
957 list_add_tail(&seg->list, &send_wr->rmpp_list);
960 /* Zero any padding */
962 memset(seg->data + seg_size - pad, 0, pad);
964 rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
966 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
967 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
969 send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
970 struct ib_rmpp_segment, list);
971 send_wr->last_ack_seg = send_wr->cur_seg;
975 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
977 return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
979 EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
981 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
982 u32 remote_qpn, u16 pkey_index,
984 int hdr_len, int data_len,
988 struct ib_mad_agent_private *mad_agent_priv;
989 struct ib_mad_send_wr_private *mad_send_wr;
990 int pad, message_size, ret, size;
995 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
998 opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
1000 if (opa && base_version == OPA_MGMT_BASE_VERSION)
1001 mad_size = sizeof(struct opa_mad);
1003 mad_size = sizeof(struct ib_mad);
1005 pad = get_pad_size(hdr_len, data_len, mad_size);
1006 message_size = hdr_len + data_len + pad;
1008 if (ib_mad_kernel_rmpp_agent(mad_agent)) {
1009 if (!rmpp_active && message_size > mad_size)
1010 return ERR_PTR(-EINVAL);
1012 if (rmpp_active || message_size > mad_size)
1013 return ERR_PTR(-EINVAL);
1015 size = rmpp_active ? hdr_len : mad_size;
1016 buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
1018 return ERR_PTR(-ENOMEM);
1020 mad_send_wr = buf + size;
1021 INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
1022 mad_send_wr->send_buf.mad = buf;
1023 mad_send_wr->send_buf.hdr_len = hdr_len;
1024 mad_send_wr->send_buf.data_len = data_len;
1025 mad_send_wr->pad = pad;
1027 mad_send_wr->mad_agent_priv = mad_agent_priv;
1028 mad_send_wr->sg_list[0].length = hdr_len;
1029 mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
1031 /* OPA MADs don't have to be the full 2048 bytes */
1032 if (opa && base_version == OPA_MGMT_BASE_VERSION &&
1033 data_len < mad_size - hdr_len)
1034 mad_send_wr->sg_list[1].length = data_len;
1036 mad_send_wr->sg_list[1].length = mad_size - hdr_len;
1038 mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
1040 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1042 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1043 mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
1044 mad_send_wr->send_wr.wr.num_sge = 2;
1045 mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
1046 mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
1047 mad_send_wr->send_wr.remote_qpn = remote_qpn;
1048 mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
1049 mad_send_wr->send_wr.pkey_index = pkey_index;
1052 ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
1055 return ERR_PTR(ret);
1059 mad_send_wr->send_buf.mad_agent = mad_agent;
1060 atomic_inc(&mad_agent_priv->refcount);
1061 return &mad_send_wr->send_buf;
1063 EXPORT_SYMBOL(ib_create_send_mad);
1065 int ib_get_mad_data_offset(u8 mgmt_class)
1067 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
1068 return IB_MGMT_SA_HDR;
1069 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1070 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1071 (mgmt_class == IB_MGMT_CLASS_BIS))
1072 return IB_MGMT_DEVICE_HDR;
1073 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1074 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
1075 return IB_MGMT_VENDOR_HDR;
1077 return IB_MGMT_MAD_HDR;
1079 EXPORT_SYMBOL(ib_get_mad_data_offset);
1081 int ib_is_mad_class_rmpp(u8 mgmt_class)
1083 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
1084 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
1085 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
1086 (mgmt_class == IB_MGMT_CLASS_BIS) ||
1087 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
1088 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
1092 EXPORT_SYMBOL(ib_is_mad_class_rmpp);
1094 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
1096 struct ib_mad_send_wr_private *mad_send_wr;
1097 struct list_head *list;
1099 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1101 list = &mad_send_wr->cur_seg->list;
1103 if (mad_send_wr->cur_seg->num < seg_num) {
1104 list_for_each_entry(mad_send_wr->cur_seg, list, list)
1105 if (mad_send_wr->cur_seg->num == seg_num)
1107 } else if (mad_send_wr->cur_seg->num > seg_num) {
1108 list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
1109 if (mad_send_wr->cur_seg->num == seg_num)
1112 return mad_send_wr->cur_seg->data;
1114 EXPORT_SYMBOL(ib_get_rmpp_segment);
1116 static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
1118 if (mad_send_wr->send_buf.seg_count)
1119 return ib_get_rmpp_segment(&mad_send_wr->send_buf,
1120 mad_send_wr->seg_num);
1122 return mad_send_wr->send_buf.mad +
1123 mad_send_wr->send_buf.hdr_len;
1126 void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
1128 struct ib_mad_agent_private *mad_agent_priv;
1129 struct ib_mad_send_wr_private *mad_send_wr;
1131 mad_agent_priv = container_of(send_buf->mad_agent,
1132 struct ib_mad_agent_private, agent);
1133 mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
1136 free_send_rmpp_list(mad_send_wr);
1137 kfree(send_buf->mad);
1138 deref_mad_agent(mad_agent_priv);
1140 EXPORT_SYMBOL(ib_free_send_mad);
1142 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1144 struct ib_mad_qp_info *qp_info;
1145 struct list_head *list;
1146 struct ib_send_wr *bad_send_wr;
1147 struct ib_mad_agent *mad_agent;
1149 unsigned long flags;
1152 /* Set WR ID to find mad_send_wr upon completion */
1153 qp_info = mad_send_wr->mad_agent_priv->qp_info;
1154 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1155 mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1156 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1158 mad_agent = mad_send_wr->send_buf.mad_agent;
1159 sge = mad_send_wr->sg_list;
1160 sge[0].addr = ib_dma_map_single(mad_agent->device,
1161 mad_send_wr->send_buf.mad,
1164 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1167 mad_send_wr->header_mapping = sge[0].addr;
1169 sge[1].addr = ib_dma_map_single(mad_agent->device,
1170 ib_get_payload(mad_send_wr),
1173 if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1174 ib_dma_unmap_single(mad_agent->device,
1175 mad_send_wr->header_mapping,
1176 sge[0].length, DMA_TO_DEVICE);
1179 mad_send_wr->payload_mapping = sge[1].addr;
1181 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1182 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1183 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1185 list = &qp_info->send_queue.list;
1188 list = &qp_info->overflow_list;
1192 qp_info->send_queue.count++;
1193 list_add_tail(&mad_send_wr->mad_list.list, list);
1195 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1197 ib_dma_unmap_single(mad_agent->device,
1198 mad_send_wr->header_mapping,
1199 sge[0].length, DMA_TO_DEVICE);
1200 ib_dma_unmap_single(mad_agent->device,
1201 mad_send_wr->payload_mapping,
1202 sge[1].length, DMA_TO_DEVICE);
1208 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1209 * with the registered client
1211 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1212 struct ib_mad_send_buf **bad_send_buf)
1214 struct ib_mad_agent_private *mad_agent_priv;
1215 struct ib_mad_send_buf *next_send_buf;
1216 struct ib_mad_send_wr_private *mad_send_wr;
1217 unsigned long flags;
1220 /* Walk list of send WRs and post each on send list */
1221 for (; send_buf; send_buf = next_send_buf) {
1223 mad_send_wr = container_of(send_buf,
1224 struct ib_mad_send_wr_private,
1226 mad_agent_priv = mad_send_wr->mad_agent_priv;
1228 if (!send_buf->mad_agent->send_handler ||
1229 (send_buf->timeout_ms &&
1230 !send_buf->mad_agent->recv_handler)) {
1235 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1236 if (mad_agent_priv->agent.rmpp_version) {
1243 * Save pointer to next work request to post in case the
1244 * current one completes, and the user modifies the work
1245 * request associated with the completion
1247 next_send_buf = send_buf->next;
1248 mad_send_wr->send_wr.ah = send_buf->ah;
1250 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1251 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1252 ret = handle_outgoing_dr_smp(mad_agent_priv,
1254 if (ret < 0) /* error */
1256 else if (ret == 1) /* locally consumed */
1260 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1261 /* Timeout will be updated after send completes */
1262 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1263 mad_send_wr->max_retries = send_buf->retries;
1264 mad_send_wr->retries_left = send_buf->retries;
1265 send_buf->retries = 0;
1266 /* Reference for work request to QP + response */
1267 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1268 mad_send_wr->status = IB_WC_SUCCESS;
1270 /* Reference MAD agent until send completes */
1271 atomic_inc(&mad_agent_priv->refcount);
1272 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1273 list_add_tail(&mad_send_wr->agent_list,
1274 &mad_agent_priv->send_list);
1275 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1277 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1278 ret = ib_send_rmpp_mad(mad_send_wr);
1279 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1280 ret = ib_send_mad(mad_send_wr);
1282 ret = ib_send_mad(mad_send_wr);
1284 /* Fail send request */
1285 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1286 list_del(&mad_send_wr->agent_list);
1287 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1288 atomic_dec(&mad_agent_priv->refcount);
1295 *bad_send_buf = send_buf;
1298 EXPORT_SYMBOL(ib_post_send_mad);
1301 * ib_free_recv_mad - Returns data buffers used to receive
1302 * a MAD to the access layer
1304 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1306 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1307 struct ib_mad_private_header *mad_priv_hdr;
1308 struct ib_mad_private *priv;
1309 struct list_head free_list;
1311 INIT_LIST_HEAD(&free_list);
1312 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1314 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1316 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1318 mad_priv_hdr = container_of(mad_recv_wc,
1319 struct ib_mad_private_header,
1321 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1326 EXPORT_SYMBOL(ib_free_recv_mad);
1328 struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
1330 ib_mad_send_handler send_handler,
1331 ib_mad_recv_handler recv_handler,
1334 return ERR_PTR(-EINVAL); /* XXX: for now */
1336 EXPORT_SYMBOL(ib_redirect_mad_qp);
1338 int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
1341 dev_err(&mad_agent->device->dev,
1342 "ib_process_mad_wc() not implemented yet\n");
1345 EXPORT_SYMBOL(ib_process_mad_wc);
1347 static int method_in_use(struct ib_mad_mgmt_method_table **method,
1348 struct ib_mad_reg_req *mad_reg_req)
1352 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1353 if ((*method)->agent[i]) {
1354 pr_err("Method %d already in use\n", i);
1361 static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1363 /* Allocate management method table */
1364 *method = kzalloc(sizeof **method, GFP_ATOMIC);
1366 pr_err("No memory for ib_mad_mgmt_method_table\n");
1374 * Check to see if there are any methods still in use
1376 static int check_method_table(struct ib_mad_mgmt_method_table *method)
1380 for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1381 if (method->agent[i])
1387 * Check to see if there are any method tables for this class still in use
1389 static int check_class_table(struct ib_mad_mgmt_class_table *class)
1393 for (i = 0; i < MAX_MGMT_CLASS; i++)
1394 if (class->method_table[i])
1399 static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1403 for (i = 0; i < MAX_MGMT_OUI; i++)
1404 if (vendor_class->method_table[i])
1409 static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1414 for (i = 0; i < MAX_MGMT_OUI; i++)
1415 /* Is there matching OUI for this vendor class ? */
1416 if (!memcmp(vendor_class->oui[i], oui, 3))
1422 static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1426 for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1427 if (vendor->vendor_class[i])
1433 static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1434 struct ib_mad_agent_private *agent)
1438 /* Remove any methods for this mad agent */
1439 for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1440 if (method->agent[i] == agent) {
1441 method->agent[i] = NULL;
1446 static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1447 struct ib_mad_agent_private *agent_priv,
1450 struct ib_mad_port_private *port_priv;
1451 struct ib_mad_mgmt_class_table **class;
1452 struct ib_mad_mgmt_method_table **method;
1455 port_priv = agent_priv->qp_info->port_priv;
1456 class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1458 /* Allocate management class table for "new" class version */
1459 *class = kzalloc(sizeof **class, GFP_ATOMIC);
1461 dev_err(&agent_priv->agent.device->dev,
1462 "No memory for ib_mad_mgmt_class_table\n");
1467 /* Allocate method table for this management class */
1468 method = &(*class)->method_table[mgmt_class];
1469 if ((ret = allocate_method_table(method)))
1472 method = &(*class)->method_table[mgmt_class];
1474 /* Allocate method table for this management class */
1475 if ((ret = allocate_method_table(method)))
1480 /* Now, make sure methods are not already in use */
1481 if (method_in_use(method, mad_reg_req))
1484 /* Finally, add in methods being registered */
1485 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1486 (*method)->agent[i] = agent_priv;
1491 /* Remove any methods for this mad agent */
1492 remove_methods_mad_agent(*method, agent_priv);
1493 /* Now, check to see if there are any methods in use */
1494 if (!check_method_table(*method)) {
1495 /* If not, release management method table */
1508 static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1509 struct ib_mad_agent_private *agent_priv)
1511 struct ib_mad_port_private *port_priv;
1512 struct ib_mad_mgmt_vendor_class_table **vendor_table;
1513 struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1514 struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1515 struct ib_mad_mgmt_method_table **method;
1516 int i, ret = -ENOMEM;
1519 /* "New" vendor (with OUI) class */
1520 vclass = vendor_class_index(mad_reg_req->mgmt_class);
1521 port_priv = agent_priv->qp_info->port_priv;
1522 vendor_table = &port_priv->version[
1523 mad_reg_req->mgmt_class_version].vendor;
1524 if (!*vendor_table) {
1525 /* Allocate mgmt vendor class table for "new" class version */
1526 vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1528 dev_err(&agent_priv->agent.device->dev,
1529 "No memory for ib_mad_mgmt_vendor_class_table\n");
1533 *vendor_table = vendor;
1535 if (!(*vendor_table)->vendor_class[vclass]) {
1536 /* Allocate table for this management vendor class */
1537 vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1538 if (!vendor_class) {
1539 dev_err(&agent_priv->agent.device->dev,
1540 "No memory for ib_mad_mgmt_vendor_class\n");
1544 (*vendor_table)->vendor_class[vclass] = vendor_class;
1546 for (i = 0; i < MAX_MGMT_OUI; i++) {
1547 /* Is there matching OUI for this vendor class ? */
1548 if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1549 mad_reg_req->oui, 3)) {
1550 method = &(*vendor_table)->vendor_class[
1551 vclass]->method_table[i];
1557 for (i = 0; i < MAX_MGMT_OUI; i++) {
1558 /* OUI slot available ? */
1559 if (!is_vendor_oui((*vendor_table)->vendor_class[
1561 method = &(*vendor_table)->vendor_class[
1562 vclass]->method_table[i];
1563 /* Allocate method table for this OUI */
1565 ret = allocate_method_table(method);
1569 memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1570 mad_reg_req->oui, 3);
1574 dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1578 /* Now, make sure methods are not already in use */
1579 if (method_in_use(method, mad_reg_req))
1582 /* Finally, add in methods being registered */
1583 for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1584 (*method)->agent[i] = agent_priv;
1589 /* Remove any methods for this mad agent */
1590 remove_methods_mad_agent(*method, agent_priv);
1591 /* Now, check to see if there are any methods in use */
1592 if (!check_method_table(*method)) {
1593 /* If not, release management method table */
1600 (*vendor_table)->vendor_class[vclass] = NULL;
1601 kfree(vendor_class);
1605 *vendor_table = NULL;
1612 static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1614 struct ib_mad_port_private *port_priv;
1615 struct ib_mad_mgmt_class_table *class;
1616 struct ib_mad_mgmt_method_table *method;
1617 struct ib_mad_mgmt_vendor_class_table *vendor;
1618 struct ib_mad_mgmt_vendor_class *vendor_class;
1623 * Was MAD registration request supplied
1624 * with original registration ?
1626 if (!agent_priv->reg_req) {
1630 port_priv = agent_priv->qp_info->port_priv;
1631 mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1632 class = port_priv->version[
1633 agent_priv->reg_req->mgmt_class_version].class;
1637 method = class->method_table[mgmt_class];
1639 /* Remove any methods for this mad agent */
1640 remove_methods_mad_agent(method, agent_priv);
1641 /* Now, check to see if there are any methods still in use */
1642 if (!check_method_table(method)) {
1643 /* If not, release management method table */
1645 class->method_table[mgmt_class] = NULL;
1646 /* Any management classes left ? */
1647 if (!check_class_table(class)) {
1648 /* If not, release management class table */
1651 agent_priv->reg_req->
1652 mgmt_class_version].class = NULL;
1658 if (!is_vendor_class(mgmt_class))
1661 /* normalize mgmt_class to vendor range 2 */
1662 mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1663 vendor = port_priv->version[
1664 agent_priv->reg_req->mgmt_class_version].vendor;
1669 vendor_class = vendor->vendor_class[mgmt_class];
1671 index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1674 method = vendor_class->method_table[index];
1676 /* Remove any methods for this mad agent */
1677 remove_methods_mad_agent(method, agent_priv);
1679 * Now, check to see if there are
1680 * any methods still in use
1682 if (!check_method_table(method)) {
1683 /* If not, release management method table */
1685 vendor_class->method_table[index] = NULL;
1686 memset(vendor_class->oui[index], 0, 3);
1687 /* Any OUIs left ? */
1688 if (!check_vendor_class(vendor_class)) {
1689 /* If not, release vendor class table */
1690 kfree(vendor_class);
1691 vendor->vendor_class[mgmt_class] = NULL;
1692 /* Any other vendor classes left ? */
1693 if (!check_vendor_table(vendor)) {
1696 agent_priv->reg_req->
1697 mgmt_class_version].
1709 static struct ib_mad_agent_private *
1710 find_mad_agent(struct ib_mad_port_private *port_priv,
1711 const struct ib_mad_hdr *mad_hdr)
1713 struct ib_mad_agent_private *mad_agent = NULL;
1714 unsigned long flags;
1716 spin_lock_irqsave(&port_priv->reg_lock, flags);
1717 if (ib_response_mad(mad_hdr)) {
1719 struct ib_mad_agent_private *entry;
1722 * Routing is based on high 32 bits of transaction ID
1725 hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1726 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1727 if (entry->agent.hi_tid == hi_tid) {
1733 struct ib_mad_mgmt_class_table *class;
1734 struct ib_mad_mgmt_method_table *method;
1735 struct ib_mad_mgmt_vendor_class_table *vendor;
1736 struct ib_mad_mgmt_vendor_class *vendor_class;
1737 const struct ib_vendor_mad *vendor_mad;
1741 * Routing is based on version, class, and method
1742 * For "newer" vendor MADs, also based on OUI
1744 if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1746 if (!is_vendor_class(mad_hdr->mgmt_class)) {
1747 class = port_priv->version[
1748 mad_hdr->class_version].class;
1751 if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1752 ARRAY_SIZE(class->method_table))
1754 method = class->method_table[convert_mgmt_class(
1755 mad_hdr->mgmt_class)];
1757 mad_agent = method->agent[mad_hdr->method &
1758 ~IB_MGMT_METHOD_RESP];
1760 vendor = port_priv->version[
1761 mad_hdr->class_version].vendor;
1764 vendor_class = vendor->vendor_class[vendor_class_index(
1765 mad_hdr->mgmt_class)];
1768 /* Find matching OUI */
1769 vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1770 index = find_vendor_oui(vendor_class, vendor_mad->oui);
1773 method = vendor_class->method_table[index];
1775 mad_agent = method->agent[mad_hdr->method &
1776 ~IB_MGMT_METHOD_RESP];
1782 if (mad_agent->agent.recv_handler)
1783 atomic_inc(&mad_agent->refcount);
1785 dev_notice(&port_priv->device->dev,
1786 "No receive handler for client %p on port %d\n",
1787 &mad_agent->agent, port_priv->port_num);
1792 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1797 static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1798 const struct ib_mad_qp_info *qp_info,
1802 u32 qp_num = qp_info->qp->qp_num;
1804 /* Make sure MAD base version is understood */
1805 if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1806 (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1807 pr_err("MAD received with unsupported base version %d %s\n",
1808 mad_hdr->base_version, opa ? "(opa)" : "");
1812 /* Filter SMI packets sent to other than QP0 */
1813 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1814 (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1818 /* CM attributes other than ClassPortInfo only use Send method */
1819 if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1820 (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1821 (mad_hdr->method != IB_MGMT_METHOD_SEND))
1823 /* Filter GSI packets sent to QP0 */
1832 static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1833 const struct ib_mad_hdr *mad_hdr)
1835 struct ib_rmpp_mad *rmpp_mad;
1837 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1838 return !mad_agent_priv->agent.rmpp_version ||
1839 !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1840 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1841 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1842 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1845 static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1846 const struct ib_mad_recv_wc *rwc)
1848 return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1849 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1852 static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1853 const struct ib_mad_send_wr_private *wr,
1854 const struct ib_mad_recv_wc *rwc )
1856 struct ib_ah_attr attr;
1857 u8 send_resp, rcv_resp;
1859 struct ib_device *device = mad_agent_priv->agent.device;
1860 u8 port_num = mad_agent_priv->agent.port_num;
1863 send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1864 rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1866 if (send_resp == rcv_resp)
1867 /* both requests, or both responses. GIDs different */
1870 if (ib_query_ah(wr->send_buf.ah, &attr))
1871 /* Assume not equal, to avoid false positives. */
1874 if (!!(attr.ah_flags & IB_AH_GRH) !=
1875 !!(rwc->wc->wc_flags & IB_WC_GRH))
1876 /* one has GID, other does not. Assume different */
1879 if (!send_resp && rcv_resp) {
1880 /* is request/response. */
1881 if (!(attr.ah_flags & IB_AH_GRH)) {
1882 if (ib_get_cached_lmc(device, port_num, &lmc))
1884 return (!lmc || !((attr.src_path_bits ^
1885 rwc->wc->dlid_path_bits) &
1888 if (ib_get_cached_gid(device, port_num,
1889 attr.grh.sgid_index, &sgid, NULL))
1891 return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1896 if (!(attr.ah_flags & IB_AH_GRH))
1897 return attr.dlid == rwc->wc->slid;
1899 return !memcmp(attr.grh.dgid.raw, rwc->recv_buf.grh->sgid.raw,
1903 static inline int is_direct(u8 class)
1905 return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1908 struct ib_mad_send_wr_private*
1909 ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1910 const struct ib_mad_recv_wc *wc)
1912 struct ib_mad_send_wr_private *wr;
1913 const struct ib_mad_hdr *mad_hdr;
1915 mad_hdr = &wc->recv_buf.mad->mad_hdr;
1917 list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1918 if ((wr->tid == mad_hdr->tid) &&
1919 rcv_has_same_class(wr, wc) &&
1921 * Don't check GID for direct routed MADs.
1922 * These might have permissive LIDs.
1924 (is_direct(mad_hdr->mgmt_class) ||
1925 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1926 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1930 * It's possible to receive the response before we've
1931 * been notified that the send has completed
1933 list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1934 if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1935 wr->tid == mad_hdr->tid &&
1937 rcv_has_same_class(wr, wc) &&
1939 * Don't check GID for direct routed MADs.
1940 * These might have permissive LIDs.
1942 (is_direct(mad_hdr->mgmt_class) ||
1943 rcv_has_same_gid(mad_agent_priv, wr, wc)))
1944 /* Verify request has not been canceled */
1945 return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1950 void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1952 mad_send_wr->timeout = 0;
1953 if (mad_send_wr->refcount == 1)
1954 list_move_tail(&mad_send_wr->agent_list,
1955 &mad_send_wr->mad_agent_priv->done_list);
1958 static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1959 struct ib_mad_recv_wc *mad_recv_wc)
1961 struct ib_mad_send_wr_private *mad_send_wr;
1962 struct ib_mad_send_wc mad_send_wc;
1963 unsigned long flags;
1965 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1966 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1967 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1968 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1971 deref_mad_agent(mad_agent_priv);
1976 /* Complete corresponding request */
1977 if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1978 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1979 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1981 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1982 if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1983 && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1984 && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1985 & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1986 /* user rmpp is in effect
1987 * and this is an active RMPP MAD
1989 mad_agent_priv->agent.recv_handler(
1990 &mad_agent_priv->agent, NULL,
1992 atomic_dec(&mad_agent_priv->refcount);
1994 /* not user rmpp, revert to normal behavior and
1996 ib_free_recv_mad(mad_recv_wc);
1997 deref_mad_agent(mad_agent_priv);
2001 ib_mark_mad_done(mad_send_wr);
2002 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2004 /* Defined behavior is to complete response before request */
2005 mad_agent_priv->agent.recv_handler(
2006 &mad_agent_priv->agent,
2007 &mad_send_wr->send_buf,
2009 atomic_dec(&mad_agent_priv->refcount);
2011 mad_send_wc.status = IB_WC_SUCCESS;
2012 mad_send_wc.vendor_err = 0;
2013 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2014 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2017 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
2019 deref_mad_agent(mad_agent_priv);
2023 static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
2024 const struct ib_mad_qp_info *qp_info,
2025 const struct ib_wc *wc,
2027 struct ib_mad_private *recv,
2028 struct ib_mad_private *response)
2030 enum smi_forward_action retsmi;
2031 struct ib_smp *smp = (struct ib_smp *)recv->mad;
2033 if (smi_handle_dr_smp_recv(smp,
2034 rdma_cap_ib_switch(port_priv->device),
2036 port_priv->device->phys_port_cnt) ==
2038 return IB_SMI_DISCARD;
2040 retsmi = smi_check_forward_dr_smp(smp);
2041 if (retsmi == IB_SMI_LOCAL)
2042 return IB_SMI_HANDLE;
2044 if (retsmi == IB_SMI_SEND) { /* don't forward */
2045 if (smi_handle_dr_smp_send(smp,
2046 rdma_cap_ib_switch(port_priv->device),
2047 port_num) == IB_SMI_DISCARD)
2048 return IB_SMI_DISCARD;
2050 if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
2051 return IB_SMI_DISCARD;
2052 } else if (rdma_cap_ib_switch(port_priv->device)) {
2053 /* forward case for switches */
2054 memcpy(response, recv, mad_priv_size(response));
2055 response->header.recv_wc.wc = &response->header.wc;
2056 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2057 response->header.recv_wc.recv_buf.grh = &response->grh;
2059 agent_send_response((const struct ib_mad_hdr *)response->mad,
2062 smi_get_fwd_port(smp),
2063 qp_info->qp->qp_num,
2067 return IB_SMI_DISCARD;
2069 return IB_SMI_HANDLE;
2072 static bool generate_unmatched_resp(const struct ib_mad_private *recv,
2073 struct ib_mad_private *response,
2074 size_t *resp_len, bool opa)
2076 const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
2077 struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
2079 if (recv_hdr->method == IB_MGMT_METHOD_GET ||
2080 recv_hdr->method == IB_MGMT_METHOD_SET) {
2081 memcpy(response, recv, mad_priv_size(response));
2082 response->header.recv_wc.wc = &response->header.wc;
2083 response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
2084 response->header.recv_wc.recv_buf.grh = &response->grh;
2085 resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
2086 resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
2087 if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2088 resp_hdr->status |= IB_SMP_DIRECTION;
2090 if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
2091 if (recv_hdr->mgmt_class ==
2092 IB_MGMT_CLASS_SUBN_LID_ROUTED ||
2093 recv_hdr->mgmt_class ==
2094 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
2095 *resp_len = opa_get_smp_header_size(
2096 (struct opa_smp *)recv->mad);
2098 *resp_len = sizeof(struct ib_mad_hdr);
2107 static enum smi_action
2108 handle_opa_smi(struct ib_mad_port_private *port_priv,
2109 struct ib_mad_qp_info *qp_info,
2112 struct ib_mad_private *recv,
2113 struct ib_mad_private *response)
2115 enum smi_forward_action retsmi;
2116 struct opa_smp *smp = (struct opa_smp *)recv->mad;
2118 if (opa_smi_handle_dr_smp_recv(smp,
2119 rdma_cap_ib_switch(port_priv->device),
2121 port_priv->device->phys_port_cnt) ==
2123 return IB_SMI_DISCARD;
2125 retsmi = opa_smi_check_forward_dr_smp(smp);
2126 if (retsmi == IB_SMI_LOCAL)
2127 return IB_SMI_HANDLE;
2129 if (retsmi == IB_SMI_SEND) { /* don't forward */
2130 if (opa_smi_handle_dr_smp_send(smp,
2131 rdma_cap_ib_switch(port_priv->device),
2132 port_num) == IB_SMI_DISCARD)
2133 return IB_SMI_DISCARD;
2135 if (opa_smi_check_local_smp(smp, port_priv->device) ==
2137 return IB_SMI_DISCARD;
2139 } else if (rdma_cap_ib_switch(port_priv->device)) {
2140 /* forward case for switches */
2141 memcpy(response, recv, mad_priv_size(response));
2142 response->header.recv_wc.wc = &response->header.wc;
2143 response->header.recv_wc.recv_buf.opa_mad =
2144 (struct opa_mad *)response->mad;
2145 response->header.recv_wc.recv_buf.grh = &response->grh;
2147 agent_send_response((const struct ib_mad_hdr *)response->mad,
2150 opa_smi_get_fwd_port(smp),
2151 qp_info->qp->qp_num,
2152 recv->header.wc.byte_len,
2155 return IB_SMI_DISCARD;
2158 return IB_SMI_HANDLE;
2161 static enum smi_action
2162 handle_smi(struct ib_mad_port_private *port_priv,
2163 struct ib_mad_qp_info *qp_info,
2166 struct ib_mad_private *recv,
2167 struct ib_mad_private *response,
2170 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2172 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2173 mad_hdr->class_version == OPA_SMI_CLASS_VERSION)
2174 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2177 return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2180 static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2182 struct ib_mad_port_private *port_priv = cq->cq_context;
2183 struct ib_mad_list_head *mad_list =
2184 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2185 struct ib_mad_qp_info *qp_info;
2186 struct ib_mad_private_header *mad_priv_hdr;
2187 struct ib_mad_private *recv, *response = NULL;
2188 struct ib_mad_agent_private *mad_agent;
2190 int ret = IB_MAD_RESULT_SUCCESS;
2192 u16 resp_mad_pkey_index = 0;
2195 if (list_empty_careful(&port_priv->port_list))
2198 if (wc->status != IB_WC_SUCCESS) {
2200 * Receive errors indicate that the QP has entered the error
2201 * state - error handling/shutdown code will cleanup
2206 qp_info = mad_list->mad_queue->qp_info;
2207 dequeue_mad(mad_list);
2209 opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2210 qp_info->port_priv->port_num);
2212 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2214 recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2215 ib_dma_unmap_single(port_priv->device,
2216 recv->header.mapping,
2217 mad_priv_dma_size(recv),
2220 /* Setup MAD receive work completion from "normal" work completion */
2221 recv->header.wc = *wc;
2222 recv->header.recv_wc.wc = &recv->header.wc;
2224 if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2225 recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2226 recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2228 recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2229 recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2232 recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2233 recv->header.recv_wc.recv_buf.grh = &recv->grh;
2235 if (atomic_read(&qp_info->snoop_count))
2236 snoop_recv(qp_info, &recv->header.recv_wc, IB_MAD_SNOOP_RECVS);
2239 if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2242 mad_size = recv->mad_size;
2243 response = alloc_mad_private(mad_size, GFP_KERNEL);
2245 dev_err(&port_priv->device->dev,
2246 "%s: no memory for response buffer\n", __func__);
2250 if (rdma_cap_ib_switch(port_priv->device))
2251 port_num = wc->port_num;
2253 port_num = port_priv->port_num;
2255 if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2256 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2257 if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2263 /* Give driver "right of first refusal" on incoming MAD */
2264 if (port_priv->device->process_mad) {
2265 ret = port_priv->device->process_mad(port_priv->device, 0,
2266 port_priv->port_num,
2268 (const struct ib_mad_hdr *)recv->mad,
2270 (struct ib_mad_hdr *)response->mad,
2271 &mad_size, &resp_mad_pkey_index);
2274 wc->pkey_index = resp_mad_pkey_index;
2276 if (ret & IB_MAD_RESULT_SUCCESS) {
2277 if (ret & IB_MAD_RESULT_CONSUMED)
2279 if (ret & IB_MAD_RESULT_REPLY) {
2280 agent_send_response((const struct ib_mad_hdr *)response->mad,
2284 qp_info->qp->qp_num,
2291 mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2293 ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2295 * recv is freed up in error cases in ib_mad_complete_recv
2296 * or via recv_handler in ib_mad_complete_recv()
2299 } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2300 generate_unmatched_resp(recv, response, &mad_size, opa)) {
2301 agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2302 port_priv->device, port_num,
2303 qp_info->qp->qp_num, mad_size, opa);
2307 /* Post another receive request for this QP */
2309 ib_mad_post_receive_mads(qp_info, response);
2312 ib_mad_post_receive_mads(qp_info, recv);
2315 static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2317 struct ib_mad_send_wr_private *mad_send_wr;
2318 unsigned long delay;
2320 if (list_empty(&mad_agent_priv->wait_list)) {
2321 cancel_delayed_work(&mad_agent_priv->timed_work);
2323 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2324 struct ib_mad_send_wr_private,
2327 if (time_after(mad_agent_priv->timeout,
2328 mad_send_wr->timeout)) {
2329 mad_agent_priv->timeout = mad_send_wr->timeout;
2330 delay = mad_send_wr->timeout - jiffies;
2331 if ((long)delay <= 0)
2333 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2334 &mad_agent_priv->timed_work, delay);
2339 static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2341 struct ib_mad_agent_private *mad_agent_priv;
2342 struct ib_mad_send_wr_private *temp_mad_send_wr;
2343 struct list_head *list_item;
2344 unsigned long delay;
2346 mad_agent_priv = mad_send_wr->mad_agent_priv;
2347 list_del(&mad_send_wr->agent_list);
2349 delay = mad_send_wr->timeout;
2350 mad_send_wr->timeout += jiffies;
2353 list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2354 temp_mad_send_wr = list_entry(list_item,
2355 struct ib_mad_send_wr_private,
2357 if (time_after(mad_send_wr->timeout,
2358 temp_mad_send_wr->timeout))
2363 list_item = &mad_agent_priv->wait_list;
2364 list_add(&mad_send_wr->agent_list, list_item);
2366 /* Reschedule a work item if we have a shorter timeout */
2367 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2368 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2369 &mad_agent_priv->timed_work, delay);
2372 void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2375 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2376 wait_for_response(mad_send_wr);
2380 * Process a send work completion
2382 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2383 struct ib_mad_send_wc *mad_send_wc)
2385 struct ib_mad_agent_private *mad_agent_priv;
2386 unsigned long flags;
2389 mad_agent_priv = mad_send_wr->mad_agent_priv;
2390 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2391 if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2392 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2393 if (ret == IB_RMPP_RESULT_CONSUMED)
2396 ret = IB_RMPP_RESULT_UNHANDLED;
2398 if (mad_send_wc->status != IB_WC_SUCCESS &&
2399 mad_send_wr->status == IB_WC_SUCCESS) {
2400 mad_send_wr->status = mad_send_wc->status;
2401 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2404 if (--mad_send_wr->refcount > 0) {
2405 if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2406 mad_send_wr->status == IB_WC_SUCCESS) {
2407 wait_for_response(mad_send_wr);
2412 /* Remove send from MAD agent and notify client of completion */
2413 list_del(&mad_send_wr->agent_list);
2414 adjust_timeout(mad_agent_priv);
2415 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2417 if (mad_send_wr->status != IB_WC_SUCCESS )
2418 mad_send_wc->status = mad_send_wr->status;
2419 if (ret == IB_RMPP_RESULT_INTERNAL)
2420 ib_rmpp_send_handler(mad_send_wc);
2422 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2425 /* Release reference on agent taken when sending */
2426 deref_mad_agent(mad_agent_priv);
2429 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2432 static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2434 struct ib_mad_port_private *port_priv = cq->cq_context;
2435 struct ib_mad_list_head *mad_list =
2436 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2437 struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2438 struct ib_mad_qp_info *qp_info;
2439 struct ib_mad_queue *send_queue;
2440 struct ib_send_wr *bad_send_wr;
2441 struct ib_mad_send_wc mad_send_wc;
2442 unsigned long flags;
2445 if (list_empty_careful(&port_priv->port_list))
2448 if (wc->status != IB_WC_SUCCESS) {
2449 if (!ib_mad_send_error(port_priv, wc))
2453 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2455 send_queue = mad_list->mad_queue;
2456 qp_info = send_queue->qp_info;
2459 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2460 mad_send_wr->header_mapping,
2461 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2462 ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2463 mad_send_wr->payload_mapping,
2464 mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2465 queued_send_wr = NULL;
2466 spin_lock_irqsave(&send_queue->lock, flags);
2467 list_del(&mad_list->list);
2469 /* Move queued send to the send queue */
2470 if (send_queue->count-- > send_queue->max_active) {
2471 mad_list = container_of(qp_info->overflow_list.next,
2472 struct ib_mad_list_head, list);
2473 queued_send_wr = container_of(mad_list,
2474 struct ib_mad_send_wr_private,
2476 list_move_tail(&mad_list->list, &send_queue->list);
2478 spin_unlock_irqrestore(&send_queue->lock, flags);
2480 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2481 mad_send_wc.status = wc->status;
2482 mad_send_wc.vendor_err = wc->vendor_err;
2483 if (atomic_read(&qp_info->snoop_count))
2484 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
2485 IB_MAD_SNOOP_SEND_COMPLETIONS);
2486 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2488 if (queued_send_wr) {
2489 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2492 dev_err(&port_priv->device->dev,
2493 "ib_post_send failed: %d\n", ret);
2494 mad_send_wr = queued_send_wr;
2495 wc->status = IB_WC_LOC_QP_OP_ERR;
2501 static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2503 struct ib_mad_send_wr_private *mad_send_wr;
2504 struct ib_mad_list_head *mad_list;
2505 unsigned long flags;
2507 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2508 list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2509 mad_send_wr = container_of(mad_list,
2510 struct ib_mad_send_wr_private,
2512 mad_send_wr->retry = 1;
2514 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2517 static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2520 struct ib_mad_list_head *mad_list =
2521 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2522 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2523 struct ib_mad_send_wr_private *mad_send_wr;
2527 * Send errors will transition the QP to SQE - move
2528 * QP to RTS and repost flushed work requests
2530 mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2532 if (wc->status == IB_WC_WR_FLUSH_ERR) {
2533 if (mad_send_wr->retry) {
2535 struct ib_send_wr *bad_send_wr;
2537 mad_send_wr->retry = 0;
2538 ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2544 struct ib_qp_attr *attr;
2546 /* Transition QP to RTS and fail offending send */
2547 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2549 attr->qp_state = IB_QPS_RTS;
2550 attr->cur_qp_state = IB_QPS_SQE;
2551 ret = ib_modify_qp(qp_info->qp, attr,
2552 IB_QP_STATE | IB_QP_CUR_STATE);
2555 dev_err(&port_priv->device->dev,
2556 "%s - ib_modify_qp to RTS: %d\n",
2559 mark_sends_for_retry(qp_info);
2566 static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2568 unsigned long flags;
2569 struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2570 struct ib_mad_send_wc mad_send_wc;
2571 struct list_head cancel_list;
2573 INIT_LIST_HEAD(&cancel_list);
2575 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2576 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2577 &mad_agent_priv->send_list, agent_list) {
2578 if (mad_send_wr->status == IB_WC_SUCCESS) {
2579 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2580 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2584 /* Empty wait list to prevent receives from finding a request */
2585 list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2586 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2588 /* Report all cancelled requests */
2589 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2590 mad_send_wc.vendor_err = 0;
2592 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2593 &cancel_list, agent_list) {
2594 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2595 list_del(&mad_send_wr->agent_list);
2596 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2598 atomic_dec(&mad_agent_priv->refcount);
2602 static struct ib_mad_send_wr_private*
2603 find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2604 struct ib_mad_send_buf *send_buf)
2606 struct ib_mad_send_wr_private *mad_send_wr;
2608 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2610 if (&mad_send_wr->send_buf == send_buf)
2614 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2616 if (is_rmpp_data_mad(mad_agent_priv,
2617 mad_send_wr->send_buf.mad) &&
2618 &mad_send_wr->send_buf == send_buf)
2624 int ib_modify_mad(struct ib_mad_agent *mad_agent,
2625 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2627 struct ib_mad_agent_private *mad_agent_priv;
2628 struct ib_mad_send_wr_private *mad_send_wr;
2629 unsigned long flags;
2632 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2634 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2635 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2636 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2637 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2641 active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2643 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2644 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2647 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2649 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2651 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2653 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2656 EXPORT_SYMBOL(ib_modify_mad);
2658 void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2659 struct ib_mad_send_buf *send_buf)
2661 ib_modify_mad(mad_agent, send_buf, 0);
2663 EXPORT_SYMBOL(ib_cancel_mad);
2665 static void local_completions(struct work_struct *work)
2667 struct ib_mad_agent_private *mad_agent_priv;
2668 struct ib_mad_local_private *local;
2669 struct ib_mad_agent_private *recv_mad_agent;
2670 unsigned long flags;
2673 struct ib_mad_send_wc mad_send_wc;
2677 container_of(work, struct ib_mad_agent_private, local_work);
2679 opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2680 mad_agent_priv->qp_info->port_priv->port_num);
2682 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2683 while (!list_empty(&mad_agent_priv->local_list)) {
2684 local = list_entry(mad_agent_priv->local_list.next,
2685 struct ib_mad_local_private,
2687 list_del(&local->completion_list);
2688 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2690 if (local->mad_priv) {
2692 recv_mad_agent = local->recv_mad_agent;
2693 if (!recv_mad_agent) {
2694 dev_err(&mad_agent_priv->agent.device->dev,
2695 "No receive MAD agent for local completion\n");
2697 goto local_send_completion;
2701 * Defined behavior is to complete response
2704 build_smp_wc(recv_mad_agent->agent.qp,
2705 local->mad_send_wr->send_wr.wr.wr_cqe,
2706 be16_to_cpu(IB_LID_PERMISSIVE),
2707 local->mad_send_wr->send_wr.pkey_index,
2708 recv_mad_agent->agent.port_num, &wc);
2710 local->mad_priv->header.recv_wc.wc = &wc;
2712 base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2713 if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2714 local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2715 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2717 local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2718 local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2721 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2722 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2723 &local->mad_priv->header.recv_wc.rmpp_list);
2724 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2725 local->mad_priv->header.recv_wc.recv_buf.mad =
2726 (struct ib_mad *)local->mad_priv->mad;
2727 if (atomic_read(&recv_mad_agent->qp_info->snoop_count))
2728 snoop_recv(recv_mad_agent->qp_info,
2729 &local->mad_priv->header.recv_wc,
2730 IB_MAD_SNOOP_RECVS);
2731 recv_mad_agent->agent.recv_handler(
2732 &recv_mad_agent->agent,
2733 &local->mad_send_wr->send_buf,
2734 &local->mad_priv->header.recv_wc);
2735 spin_lock_irqsave(&recv_mad_agent->lock, flags);
2736 atomic_dec(&recv_mad_agent->refcount);
2737 spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2740 local_send_completion:
2742 mad_send_wc.status = IB_WC_SUCCESS;
2743 mad_send_wc.vendor_err = 0;
2744 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2745 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2746 snoop_send(mad_agent_priv->qp_info,
2747 &local->mad_send_wr->send_buf,
2748 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2749 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2752 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2753 atomic_dec(&mad_agent_priv->refcount);
2755 kfree(local->mad_priv);
2758 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2761 static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2765 if (!mad_send_wr->retries_left)
2768 mad_send_wr->retries_left--;
2769 mad_send_wr->send_buf.retries++;
2771 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2773 if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2774 ret = ib_retry_rmpp(mad_send_wr);
2776 case IB_RMPP_RESULT_UNHANDLED:
2777 ret = ib_send_mad(mad_send_wr);
2779 case IB_RMPP_RESULT_CONSUMED:
2787 ret = ib_send_mad(mad_send_wr);
2790 mad_send_wr->refcount++;
2791 list_add_tail(&mad_send_wr->agent_list,
2792 &mad_send_wr->mad_agent_priv->send_list);
2797 static void timeout_sends(struct work_struct *work)
2799 struct ib_mad_agent_private *mad_agent_priv;
2800 struct ib_mad_send_wr_private *mad_send_wr;
2801 struct ib_mad_send_wc mad_send_wc;
2802 unsigned long flags, delay;
2804 mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2806 mad_send_wc.vendor_err = 0;
2808 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2809 while (!list_empty(&mad_agent_priv->wait_list)) {
2810 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2811 struct ib_mad_send_wr_private,
2814 if (time_after(mad_send_wr->timeout, jiffies)) {
2815 delay = mad_send_wr->timeout - jiffies;
2816 if ((long)delay <= 0)
2818 queue_delayed_work(mad_agent_priv->qp_info->
2820 &mad_agent_priv->timed_work, delay);
2824 list_del(&mad_send_wr->agent_list);
2825 if (mad_send_wr->status == IB_WC_SUCCESS &&
2826 !retry_send(mad_send_wr))
2829 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2831 if (mad_send_wr->status == IB_WC_SUCCESS)
2832 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2834 mad_send_wc.status = mad_send_wr->status;
2835 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2836 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2839 atomic_dec(&mad_agent_priv->refcount);
2840 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2842 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2846 * Allocate receive MADs and post receive WRs for them
2848 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2849 struct ib_mad_private *mad)
2851 unsigned long flags;
2853 struct ib_mad_private *mad_priv;
2854 struct ib_sge sg_list;
2855 struct ib_recv_wr recv_wr, *bad_recv_wr;
2856 struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2858 /* Initialize common scatter list fields */
2859 sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2861 /* Initialize common receive WR fields */
2862 recv_wr.next = NULL;
2863 recv_wr.sg_list = &sg_list;
2864 recv_wr.num_sge = 1;
2867 /* Allocate and map receive buffer */
2872 mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2875 dev_err(&qp_info->port_priv->device->dev,
2876 "No memory for receive buffer\n");
2881 sg_list.length = mad_priv_dma_size(mad_priv);
2882 sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2884 mad_priv_dma_size(mad_priv),
2886 if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2892 mad_priv->header.mapping = sg_list.addr;
2893 mad_priv->header.mad_list.mad_queue = recv_queue;
2894 mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2895 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2897 /* Post receive WR */
2898 spin_lock_irqsave(&recv_queue->lock, flags);
2899 post = (++recv_queue->count < recv_queue->max_active);
2900 list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2901 spin_unlock_irqrestore(&recv_queue->lock, flags);
2902 ret = ib_post_recv(qp_info->qp, &recv_wr, &bad_recv_wr);
2904 spin_lock_irqsave(&recv_queue->lock, flags);
2905 list_del(&mad_priv->header.mad_list.list);
2906 recv_queue->count--;
2907 spin_unlock_irqrestore(&recv_queue->lock, flags);
2908 ib_dma_unmap_single(qp_info->port_priv->device,
2909 mad_priv->header.mapping,
2910 mad_priv_dma_size(mad_priv),
2913 dev_err(&qp_info->port_priv->device->dev,
2914 "ib_post_recv failed: %d\n", ret);
2923 * Return all the posted receive MADs
2925 static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2927 struct ib_mad_private_header *mad_priv_hdr;
2928 struct ib_mad_private *recv;
2929 struct ib_mad_list_head *mad_list;
2934 while (!list_empty(&qp_info->recv_queue.list)) {
2936 mad_list = list_entry(qp_info->recv_queue.list.next,
2937 struct ib_mad_list_head, list);
2938 mad_priv_hdr = container_of(mad_list,
2939 struct ib_mad_private_header,
2941 recv = container_of(mad_priv_hdr, struct ib_mad_private,
2944 /* Remove from posted receive MAD list */
2945 list_del(&mad_list->list);
2947 ib_dma_unmap_single(qp_info->port_priv->device,
2948 recv->header.mapping,
2949 mad_priv_dma_size(recv),
2954 qp_info->recv_queue.count = 0;
2960 static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2963 struct ib_qp_attr *attr;
2967 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2969 dev_err(&port_priv->device->dev,
2970 "Couldn't kmalloc ib_qp_attr\n");
2974 ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2975 IB_DEFAULT_PKEY_FULL, &pkey_index);
2979 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2980 qp = port_priv->qp_info[i].qp;
2985 * PKey index for QP1 is irrelevant but
2986 * one is needed for the Reset to Init transition
2988 attr->qp_state = IB_QPS_INIT;
2989 attr->pkey_index = pkey_index;
2990 attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2991 ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2992 IB_QP_PKEY_INDEX | IB_QP_QKEY);
2994 dev_err(&port_priv->device->dev,
2995 "Couldn't change QP%d state to INIT: %d\n",
3000 attr->qp_state = IB_QPS_RTR;
3001 ret = ib_modify_qp(qp, attr, IB_QP_STATE);
3003 dev_err(&port_priv->device->dev,
3004 "Couldn't change QP%d state to RTR: %d\n",
3009 attr->qp_state = IB_QPS_RTS;
3010 attr->sq_psn = IB_MAD_SEND_Q_PSN;
3011 ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
3013 dev_err(&port_priv->device->dev,
3014 "Couldn't change QP%d state to RTS: %d\n",
3020 ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
3022 dev_err(&port_priv->device->dev,
3023 "Failed to request completion notification: %d\n",
3028 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
3029 if (!port_priv->qp_info[i].qp)
3032 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
3034 dev_err(&port_priv->device->dev,
3035 "Couldn't post receive WRs\n");
3044 static void qp_event_handler(struct ib_event *event, void *qp_context)
3046 struct ib_mad_qp_info *qp_info = qp_context;
3048 /* It's worse than that! He's dead, Jim! */
3049 dev_err(&qp_info->port_priv->device->dev,
3050 "Fatal error (%d) on MAD QP (%d)\n",
3051 event->event, qp_info->qp->qp_num);
3054 static void init_mad_queue(struct ib_mad_qp_info *qp_info,
3055 struct ib_mad_queue *mad_queue)
3057 mad_queue->qp_info = qp_info;
3058 mad_queue->count = 0;
3059 spin_lock_init(&mad_queue->lock);
3060 INIT_LIST_HEAD(&mad_queue->list);
3063 static void init_mad_qp(struct ib_mad_port_private *port_priv,
3064 struct ib_mad_qp_info *qp_info)
3066 qp_info->port_priv = port_priv;
3067 init_mad_queue(qp_info, &qp_info->send_queue);
3068 init_mad_queue(qp_info, &qp_info->recv_queue);
3069 INIT_LIST_HEAD(&qp_info->overflow_list);
3070 spin_lock_init(&qp_info->snoop_lock);
3071 qp_info->snoop_table = NULL;
3072 qp_info->snoop_table_size = 0;
3073 atomic_set(&qp_info->snoop_count, 0);
3076 static int create_mad_qp(struct ib_mad_qp_info *qp_info,
3077 enum ib_qp_type qp_type)
3079 struct ib_qp_init_attr qp_init_attr;
3082 memset(&qp_init_attr, 0, sizeof qp_init_attr);
3083 qp_init_attr.send_cq = qp_info->port_priv->cq;
3084 qp_init_attr.recv_cq = qp_info->port_priv->cq;
3085 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
3086 qp_init_attr.cap.max_send_wr = mad_sendq_size;
3087 qp_init_attr.cap.max_recv_wr = mad_recvq_size;
3088 qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
3089 qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
3090 qp_init_attr.qp_type = qp_type;
3091 qp_init_attr.port_num = qp_info->port_priv->port_num;
3092 qp_init_attr.qp_context = qp_info;
3093 qp_init_attr.event_handler = qp_event_handler;
3094 qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
3095 if (IS_ERR(qp_info->qp)) {
3096 dev_err(&qp_info->port_priv->device->dev,
3097 "Couldn't create ib_mad QP%d\n",
3098 get_spl_qp_index(qp_type));
3099 ret = PTR_ERR(qp_info->qp);
3102 /* Use minimum queue sizes unless the CQ is resized */
3103 qp_info->send_queue.max_active = mad_sendq_size;
3104 qp_info->recv_queue.max_active = mad_recvq_size;
3111 static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
3116 ib_destroy_qp(qp_info->qp);
3117 kfree(qp_info->snoop_table);
3122 * Create the QP, PD, MR, and CQ if needed
3124 static int ib_mad_port_open(struct ib_device *device,
3128 struct ib_mad_port_private *port_priv;
3129 unsigned long flags;
3130 char name[sizeof "ib_mad123"];
3133 if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
3136 if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
3137 rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
3140 /* Create new device info */
3141 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
3143 dev_err(&device->dev, "No memory for ib_mad_port_private\n");
3147 port_priv->device = device;
3148 port_priv->port_num = port_num;
3149 spin_lock_init(&port_priv->reg_lock);
3150 INIT_LIST_HEAD(&port_priv->agent_list);
3151 init_mad_qp(port_priv, &port_priv->qp_info[0]);
3152 init_mad_qp(port_priv, &port_priv->qp_info[1]);
3154 cq_size = mad_sendq_size + mad_recvq_size;
3155 has_smi = rdma_cap_ib_smi(device, port_num);
3159 port_priv->pd = ib_alloc_pd(device, 0);
3160 if (IS_ERR(port_priv->pd)) {
3161 dev_err(&device->dev, "Couldn't create ib_mad PD\n");
3162 ret = PTR_ERR(port_priv->pd);
3166 port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
3167 IB_POLL_UNBOUND_WORKQUEUE);
3168 if (IS_ERR(port_priv->cq)) {
3169 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
3170 ret = PTR_ERR(port_priv->cq);
3175 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
3179 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3183 snprintf(name, sizeof name, "ib_mad%d", port_num);
3184 port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3185 if (!port_priv->wq) {
3190 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3191 list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3192 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3194 ret = ib_mad_port_start(port_priv);
3196 dev_err(&device->dev, "Couldn't start port\n");
3203 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3204 list_del_init(&port_priv->port_list);
3205 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3207 destroy_workqueue(port_priv->wq);
3209 destroy_mad_qp(&port_priv->qp_info[1]);
3211 destroy_mad_qp(&port_priv->qp_info[0]);
3213 ib_free_cq(port_priv->cq);
3214 cleanup_recv_queue(&port_priv->qp_info[1]);
3215 cleanup_recv_queue(&port_priv->qp_info[0]);
3217 ib_dealloc_pd(port_priv->pd);
3226 * If there are no classes using the port, free the port
3227 * resources (CQ, MR, PD, QP) and remove the port's info structure
3229 static int ib_mad_port_close(struct ib_device *device, int port_num)
3231 struct ib_mad_port_private *port_priv;
3232 unsigned long flags;
3234 spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3235 port_priv = __ib_get_mad_port(device, port_num);
3236 if (port_priv == NULL) {
3237 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3238 dev_err(&device->dev, "Port %d not found\n", port_num);
3241 list_del_init(&port_priv->port_list);
3242 spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3244 destroy_workqueue(port_priv->wq);
3245 destroy_mad_qp(&port_priv->qp_info[1]);
3246 destroy_mad_qp(&port_priv->qp_info[0]);
3247 ib_free_cq(port_priv->cq);
3248 ib_dealloc_pd(port_priv->pd);
3249 cleanup_recv_queue(&port_priv->qp_info[1]);
3250 cleanup_recv_queue(&port_priv->qp_info[0]);
3251 /* XXX: Handle deallocation of MAD registration tables */
3258 static void ib_mad_init_device(struct ib_device *device)
3262 start = rdma_start_port(device);
3264 for (i = start; i <= rdma_end_port(device); i++) {
3265 if (!rdma_cap_ib_mad(device, i))
3268 if (ib_mad_port_open(device, i)) {
3269 dev_err(&device->dev, "Couldn't open port %d\n", i);
3272 if (ib_agent_port_open(device, i)) {
3273 dev_err(&device->dev,
3274 "Couldn't open port %d for agents\n", i);
3281 if (ib_mad_port_close(device, i))
3282 dev_err(&device->dev, "Couldn't close port %d\n", i);
3285 while (--i >= start) {
3286 if (!rdma_cap_ib_mad(device, i))
3289 if (ib_agent_port_close(device, i))
3290 dev_err(&device->dev,
3291 "Couldn't close port %d for agents\n", i);
3292 if (ib_mad_port_close(device, i))
3293 dev_err(&device->dev, "Couldn't close port %d\n", i);
3297 static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3301 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
3302 if (!rdma_cap_ib_mad(device, i))
3305 if (ib_agent_port_close(device, i))
3306 dev_err(&device->dev,
3307 "Couldn't close port %d for agents\n", i);
3308 if (ib_mad_port_close(device, i))
3309 dev_err(&device->dev, "Couldn't close port %d\n", i);
3313 static struct ib_client mad_client = {
3315 .add = ib_mad_init_device,
3316 .remove = ib_mad_remove_device
3319 int ib_mad_init(void)
3321 mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3322 mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3324 mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3325 mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3327 INIT_LIST_HEAD(&ib_mad_port_list);
3329 if (ib_register_client(&mad_client)) {
3330 pr_err("Couldn't register ib_mad client\n");
3337 void ib_mad_cleanup(void)
3339 ib_unregister_client(&mad_client);