2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2008 Cisco. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #define pr_fmt(fmt) "user_mad: " fmt
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/device.h>
41 #include <linux/err.h>
43 #include <linux/cdev.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/poll.h>
46 #include <linux/mutex.h>
47 #include <linux/kref.h>
48 #include <linux/compat.h>
49 #include <linux/sched.h>
50 #include <linux/semaphore.h>
51 #include <linux/slab.h>
52 #include <linux/nospec.h>
54 #include <asm/uaccess.h>
56 #include <rdma/ib_mad.h>
57 #include <rdma/ib_user_mad.h>
59 MODULE_AUTHOR("Roland Dreier");
60 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
61 MODULE_LICENSE("Dual BSD/GPL");
64 IB_UMAD_MAX_PORTS = 64,
65 IB_UMAD_MAX_AGENTS = 32,
68 IB_UMAD_MINOR_BASE = 0
72 * Our lifetime rules for these structs are the following:
73 * device special file is opened, we take a reference on the
74 * ib_umad_port's struct ib_umad_device. We drop these
75 * references in the corresponding close().
77 * In addition to references coming from open character devices, there
78 * is one more reference to each ib_umad_device representing the
79 * module's reference taken when allocating the ib_umad_device in
82 * When destroying an ib_umad_device, we drop the module's reference.
90 struct device *sm_dev;
91 struct semaphore sm_sem;
93 struct mutex file_mutex;
94 struct list_head file_list;
96 struct ib_device *ib_dev;
97 struct ib_umad_device *umad_dev;
102 struct ib_umad_device {
104 struct ib_umad_port port[0];
107 struct ib_umad_file {
109 struct ib_umad_port *port;
110 struct list_head recv_list;
111 struct list_head send_list;
112 struct list_head port_list;
113 spinlock_t send_lock;
114 wait_queue_head_t recv_wait;
115 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
121 struct ib_umad_packet {
122 struct ib_mad_send_buf *msg;
123 struct ib_mad_recv_wc *recv_wc;
124 struct list_head list;
126 struct ib_user_mad mad;
129 static struct class *umad_class;
131 static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
133 static DEFINE_SPINLOCK(port_lock);
134 static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
136 static void ib_umad_add_one(struct ib_device *device);
137 static void ib_umad_remove_one(struct ib_device *device, void *client_data);
139 static void ib_umad_release_dev(struct kobject *kobj)
141 struct ib_umad_device *dev =
142 container_of(kobj, struct ib_umad_device, kobj);
147 static struct kobj_type ib_umad_dev_ktype = {
148 .release = ib_umad_release_dev,
151 static int hdr_size(struct ib_umad_file *file)
153 return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) :
154 sizeof (struct ib_user_mad_hdr_old);
157 /* caller must hold file->mutex */
158 static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
160 return file->agents_dead ? NULL : file->agent[id];
163 static int queue_packet(struct ib_umad_file *file,
164 struct ib_mad_agent *agent,
165 struct ib_umad_packet *packet)
169 mutex_lock(&file->mutex);
171 for (packet->mad.hdr.id = 0;
172 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
173 packet->mad.hdr.id++)
174 if (agent == __get_agent(file, packet->mad.hdr.id)) {
175 list_add_tail(&packet->list, &file->recv_list);
176 wake_up_interruptible(&file->recv_wait);
181 mutex_unlock(&file->mutex);
186 static void dequeue_send(struct ib_umad_file *file,
187 struct ib_umad_packet *packet)
189 spin_lock_irq(&file->send_lock);
190 list_del(&packet->list);
191 spin_unlock_irq(&file->send_lock);
194 static void send_handler(struct ib_mad_agent *agent,
195 struct ib_mad_send_wc *send_wc)
197 struct ib_umad_file *file = agent->context;
198 struct ib_umad_packet *packet = send_wc->send_buf->context[0];
200 dequeue_send(file, packet);
201 ib_destroy_ah(packet->msg->ah);
202 ib_free_send_mad(packet->msg);
204 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
205 packet->length = IB_MGMT_MAD_HDR;
206 packet->mad.hdr.status = ETIMEDOUT;
207 if (!queue_packet(file, agent, packet))
213 static void recv_handler(struct ib_mad_agent *agent,
214 struct ib_mad_recv_wc *mad_recv_wc)
216 struct ib_umad_file *file = agent->context;
217 struct ib_umad_packet *packet;
219 if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
222 packet = kzalloc(sizeof *packet, GFP_KERNEL);
226 packet->length = mad_recv_wc->mad_len;
227 packet->recv_wc = mad_recv_wc;
229 packet->mad.hdr.status = 0;
230 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
231 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
232 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
233 packet->mad.hdr.sl = mad_recv_wc->wc->sl;
234 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
235 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
236 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
237 if (packet->mad.hdr.grh_present) {
238 struct ib_ah_attr ah_attr;
240 ib_init_ah_from_wc(agent->device, agent->port_num,
241 mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
244 packet->mad.hdr.gid_index = ah_attr.grh.sgid_index;
245 packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit;
246 packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class;
247 memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16);
248 packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label);
251 if (queue_packet(file, agent, packet))
258 ib_free_recv_mad(mad_recv_wc);
261 static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf,
262 struct ib_umad_packet *packet, size_t count)
264 struct ib_mad_recv_buf *recv_buf;
265 int left, seg_payload, offset, max_seg_payload;
268 recv_buf = &packet->recv_wc->recv_buf;
269 seg_size = packet->recv_wc->mad_seg_size;
271 /* We need enough room to copy the first (or only) MAD segment. */
272 if ((packet->length <= seg_size &&
273 count < hdr_size(file) + packet->length) ||
274 (packet->length > seg_size &&
275 count < hdr_size(file) + seg_size))
278 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
281 buf += hdr_size(file);
282 seg_payload = min_t(int, packet->length, seg_size);
283 if (copy_to_user(buf, recv_buf->mad, seg_payload))
286 if (seg_payload < packet->length) {
288 * Multipacket RMPP MAD message. Copy remainder of message.
289 * Note that last segment may have a shorter payload.
291 if (count < hdr_size(file) + packet->length) {
293 * The buffer is too small, return the first RMPP segment,
294 * which includes the RMPP message length.
298 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
299 max_seg_payload = seg_size - offset;
301 for (left = packet->length - seg_payload, buf += seg_payload;
302 left; left -= seg_payload, buf += seg_payload) {
303 recv_buf = container_of(recv_buf->list.next,
304 struct ib_mad_recv_buf, list);
305 seg_payload = min(left, max_seg_payload);
306 if (copy_to_user(buf, ((void *) recv_buf->mad) + offset,
311 return hdr_size(file) + packet->length;
314 static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf,
315 struct ib_umad_packet *packet, size_t count)
317 ssize_t size = hdr_size(file) + packet->length;
322 if (copy_to_user(buf, &packet->mad, hdr_size(file)))
325 buf += hdr_size(file);
327 if (copy_to_user(buf, packet->mad.data, packet->length))
333 static ssize_t ib_umad_read(struct file *filp, char __user *buf,
334 size_t count, loff_t *pos)
336 struct ib_umad_file *file = filp->private_data;
337 struct ib_umad_packet *packet;
340 if (count < hdr_size(file))
343 mutex_lock(&file->mutex);
345 if (file->agents_dead) {
346 mutex_unlock(&file->mutex);
350 while (list_empty(&file->recv_list)) {
351 mutex_unlock(&file->mutex);
353 if (filp->f_flags & O_NONBLOCK)
356 if (wait_event_interruptible(file->recv_wait,
357 !list_empty(&file->recv_list)))
360 mutex_lock(&file->mutex);
363 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
364 list_del(&packet->list);
366 mutex_unlock(&file->mutex);
369 ret = copy_recv_mad(file, buf, packet, count);
371 ret = copy_send_mad(file, buf, packet, count);
375 mutex_lock(&file->mutex);
376 list_add(&packet->list, &file->recv_list);
377 mutex_unlock(&file->mutex);
380 ib_free_recv_mad(packet->recv_wc);
386 static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
390 /* Copy class specific header */
391 if ((msg->hdr_len > IB_MGMT_RMPP_HDR) &&
392 copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR,
393 msg->hdr_len - IB_MGMT_RMPP_HDR))
396 /* All headers are in place. Copy data segments. */
397 for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0;
398 seg++, left -= msg->seg_size, buf += msg->seg_size) {
399 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf,
400 min(left, msg->seg_size)))
406 static int same_destination(struct ib_user_mad_hdr *hdr1,
407 struct ib_user_mad_hdr *hdr2)
409 if (!hdr1->grh_present && !hdr2->grh_present)
410 return (hdr1->lid == hdr2->lid);
412 if (hdr1->grh_present && hdr2->grh_present)
413 return !memcmp(hdr1->gid, hdr2->gid, 16);
418 static int is_duplicate(struct ib_umad_file *file,
419 struct ib_umad_packet *packet)
421 struct ib_umad_packet *sent_packet;
422 struct ib_mad_hdr *sent_hdr, *hdr;
424 hdr = (struct ib_mad_hdr *) packet->mad.data;
425 list_for_each_entry(sent_packet, &file->send_list, list) {
426 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
428 if ((hdr->tid != sent_hdr->tid) ||
429 (hdr->mgmt_class != sent_hdr->mgmt_class))
433 * No need to be overly clever here. If two new operations have
434 * the same TID, reject the second as a duplicate. This is more
435 * restrictive than required by the spec.
437 if (!ib_response_mad(hdr)) {
438 if (!ib_response_mad(sent_hdr))
441 } else if (!ib_response_mad(sent_hdr))
444 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
451 static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
452 size_t count, loff_t *pos)
454 struct ib_umad_file *file = filp->private_data;
455 struct ib_umad_packet *packet;
456 struct ib_mad_agent *agent;
457 struct ib_ah_attr ah_attr;
459 struct ib_rmpp_mad *rmpp_mad;
461 int ret, data_len, hdr_len, copy_offset, rmpp_active;
464 if (count < hdr_size(file) + IB_MGMT_RMPP_HDR)
467 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
471 if (copy_from_user(&packet->mad, buf, hdr_size(file))) {
476 if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) {
481 buf += hdr_size(file);
483 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) {
488 mutex_lock(&file->mutex);
490 agent = __get_agent(file, packet->mad.hdr.id);
496 memset(&ah_attr, 0, sizeof ah_attr);
497 ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid);
498 ah_attr.sl = packet->mad.hdr.sl;
499 ah_attr.src_path_bits = packet->mad.hdr.path_bits;
500 ah_attr.port_num = file->port->port_num;
501 if (packet->mad.hdr.grh_present) {
502 ah_attr.ah_flags = IB_AH_GRH;
503 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
504 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
505 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
506 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
507 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
510 ah = ib_create_ah(agent->qp->pd, &ah_attr);
516 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
517 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
519 if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
520 && ib_mad_kernel_rmpp_agent(agent)) {
521 copy_offset = IB_MGMT_RMPP_HDR;
522 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
523 IB_MGMT_RMPP_FLAG_ACTIVE;
525 copy_offset = IB_MGMT_MAD_HDR;
529 base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version;
530 data_len = count - hdr_size(file) - hdr_len;
531 packet->msg = ib_create_send_mad(agent,
532 be32_to_cpu(packet->mad.hdr.qpn),
533 packet->mad.hdr.pkey_index, rmpp_active,
534 hdr_len, data_len, GFP_KERNEL,
536 if (IS_ERR(packet->msg)) {
537 ret = PTR_ERR(packet->msg);
541 packet->msg->ah = ah;
542 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
543 packet->msg->retries = packet->mad.hdr.retries;
544 packet->msg->context[0] = packet;
546 /* Copy MAD header. Any RMPP header is already in place. */
547 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
550 if (copy_from_user(packet->msg->mad + copy_offset,
552 hdr_len + data_len - copy_offset)) {
557 ret = copy_rmpp_mad(packet->msg, buf);
563 * Set the high-order part of the transaction ID to make MADs from
564 * different agents unique, and allow routing responses back to the
565 * original requestor.
567 if (!ib_response_mad(packet->msg->mad)) {
568 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
569 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
570 (be64_to_cpup(tid) & 0xffffffff));
571 rmpp_mad->mad_hdr.tid = *tid;
574 if (!ib_mad_kernel_rmpp_agent(agent)
575 && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
576 && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
577 spin_lock_irq(&file->send_lock);
578 list_add_tail(&packet->list, &file->send_list);
579 spin_unlock_irq(&file->send_lock);
581 spin_lock_irq(&file->send_lock);
582 ret = is_duplicate(file, packet);
584 list_add_tail(&packet->list, &file->send_list);
585 spin_unlock_irq(&file->send_lock);
592 ret = ib_post_send_mad(packet->msg, NULL);
596 mutex_unlock(&file->mutex);
600 dequeue_send(file, packet);
602 ib_free_send_mad(packet->msg);
606 mutex_unlock(&file->mutex);
612 static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
614 struct ib_umad_file *file = filp->private_data;
616 /* we will always be able to post a MAD send */
617 unsigned int mask = POLLOUT | POLLWRNORM;
619 poll_wait(filp, &file->recv_wait, wait);
621 if (!list_empty(&file->recv_list))
622 mask |= POLLIN | POLLRDNORM;
627 static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg,
628 int compat_method_mask)
630 struct ib_user_mad_reg_req ureq;
631 struct ib_mad_reg_req req;
632 struct ib_mad_agent *agent = NULL;
636 mutex_lock(&file->port->file_mutex);
637 mutex_lock(&file->mutex);
639 if (!file->port->ib_dev) {
640 dev_notice(file->port->dev,
641 "ib_umad_reg_agent: invalid device\n");
646 if (copy_from_user(&ureq, arg, sizeof ureq)) {
651 if (ureq.qpn != 0 && ureq.qpn != 1) {
652 dev_notice(file->port->dev,
653 "ib_umad_reg_agent: invalid QPN %d specified\n",
659 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
660 if (!__get_agent(file, agent_id))
663 dev_notice(file->port->dev,
664 "ib_umad_reg_agent: Max Agents (%u) reached\n",
670 if (ureq.mgmt_class) {
671 memset(&req, 0, sizeof(req));
672 req.mgmt_class = ureq.mgmt_class;
673 req.mgmt_class_version = ureq.mgmt_class_version;
674 memcpy(req.oui, ureq.oui, sizeof req.oui);
676 if (compat_method_mask) {
677 u32 *umm = (u32 *) ureq.method_mask;
680 for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i)
682 umm[i * 2] | ((u64) umm[i * 2 + 1] << 32);
684 memcpy(req.method_mask, ureq.method_mask,
685 sizeof req.method_mask);
688 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
689 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
690 ureq.mgmt_class ? &req : NULL,
692 send_handler, recv_handler, file, 0);
694 ret = PTR_ERR(agent);
699 if (put_user(agent_id,
700 (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
705 if (!file->already_used) {
706 file->already_used = 1;
707 if (!file->use_pkey_index) {
708 dev_warn(file->port->dev,
709 "process %s did not enable P_Key index support.\n",
711 dev_warn(file->port->dev,
712 " Documentation/infiniband/user_mad.txt has info on the new ABI.\n");
716 file->agent[agent_id] = agent;
720 mutex_unlock(&file->mutex);
723 ib_unregister_mad_agent(agent);
725 mutex_unlock(&file->port->file_mutex);
730 static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg)
732 struct ib_user_mad_reg_req2 ureq;
733 struct ib_mad_reg_req req;
734 struct ib_mad_agent *agent = NULL;
738 mutex_lock(&file->port->file_mutex);
739 mutex_lock(&file->mutex);
741 if (!file->port->ib_dev) {
742 dev_notice(file->port->dev,
743 "ib_umad_reg_agent2: invalid device\n");
748 if (copy_from_user(&ureq, arg, sizeof(ureq))) {
753 if (ureq.qpn != 0 && ureq.qpn != 1) {
754 dev_notice(file->port->dev,
755 "ib_umad_reg_agent2: invalid QPN %d specified\n",
761 if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) {
762 dev_notice(file->port->dev,
763 "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n",
764 ureq.flags, IB_USER_MAD_REG_FLAGS_CAP);
767 if (put_user((u32)IB_USER_MAD_REG_FLAGS_CAP,
768 (u32 __user *) (arg + offsetof(struct
769 ib_user_mad_reg_req2, flags))))
775 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
776 if (!__get_agent(file, agent_id))
779 dev_notice(file->port->dev,
780 "ib_umad_reg_agent2: Max Agents (%u) reached\n",
786 if (ureq.mgmt_class) {
787 memset(&req, 0, sizeof(req));
788 req.mgmt_class = ureq.mgmt_class;
789 req.mgmt_class_version = ureq.mgmt_class_version;
790 if (ureq.oui & 0xff000000) {
791 dev_notice(file->port->dev,
792 "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n",
797 req.oui[2] = ureq.oui & 0x0000ff;
798 req.oui[1] = (ureq.oui & 0x00ff00) >> 8;
799 req.oui[0] = (ureq.oui & 0xff0000) >> 16;
800 memcpy(req.method_mask, ureq.method_mask,
801 sizeof(req.method_mask));
804 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
805 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
806 ureq.mgmt_class ? &req : NULL,
808 send_handler, recv_handler, file,
811 ret = PTR_ERR(agent);
816 if (put_user(agent_id,
818 offsetof(struct ib_user_mad_reg_req2, id)))) {
823 if (!file->already_used) {
824 file->already_used = 1;
825 file->use_pkey_index = 1;
828 file->agent[agent_id] = agent;
832 mutex_unlock(&file->mutex);
835 ib_unregister_mad_agent(agent);
837 mutex_unlock(&file->port->file_mutex);
843 static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
845 struct ib_mad_agent *agent = NULL;
849 if (get_user(id, arg))
851 if (id >= IB_UMAD_MAX_AGENTS)
854 mutex_lock(&file->port->file_mutex);
855 mutex_lock(&file->mutex);
857 id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
858 if (!__get_agent(file, id)) {
863 agent = file->agent[id];
864 file->agent[id] = NULL;
867 mutex_unlock(&file->mutex);
870 ib_unregister_mad_agent(agent);
872 mutex_unlock(&file->port->file_mutex);
877 static long ib_umad_enable_pkey(struct ib_umad_file *file)
881 mutex_lock(&file->mutex);
882 if (file->already_used)
885 file->use_pkey_index = 1;
886 mutex_unlock(&file->mutex);
891 static long ib_umad_ioctl(struct file *filp, unsigned int cmd,
895 case IB_USER_MAD_REGISTER_AGENT:
896 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0);
897 case IB_USER_MAD_UNREGISTER_AGENT:
898 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg);
899 case IB_USER_MAD_ENABLE_PKEY:
900 return ib_umad_enable_pkey(filp->private_data);
901 case IB_USER_MAD_REGISTER_AGENT2:
902 return ib_umad_reg_agent2(filp->private_data, (void __user *) arg);
909 static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
913 case IB_USER_MAD_REGISTER_AGENT:
914 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1);
915 case IB_USER_MAD_UNREGISTER_AGENT:
916 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg));
917 case IB_USER_MAD_ENABLE_PKEY:
918 return ib_umad_enable_pkey(filp->private_data);
919 case IB_USER_MAD_REGISTER_AGENT2:
920 return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg));
928 * ib_umad_open() does not need the BKL:
930 * - the ib_umad_port structures are properly reference counted, and
931 * everything else is purely local to the file being created, so
932 * races against other open calls are not a problem;
933 * - the ioctl method does not affect any global state outside of the
934 * file structure being operated on;
936 static int ib_umad_open(struct inode *inode, struct file *filp)
938 struct ib_umad_port *port;
939 struct ib_umad_file *file;
942 port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
944 mutex_lock(&port->file_mutex);
950 file = kzalloc(sizeof *file, GFP_KERNEL);
954 mutex_init(&file->mutex);
955 spin_lock_init(&file->send_lock);
956 INIT_LIST_HEAD(&file->recv_list);
957 INIT_LIST_HEAD(&file->send_list);
958 init_waitqueue_head(&file->recv_wait);
961 filp->private_data = file;
963 list_add_tail(&file->port_list, &port->file_list);
965 ret = nonseekable_open(inode, filp);
967 list_del(&file->port_list);
972 kobject_get(&port->umad_dev->kobj);
975 mutex_unlock(&port->file_mutex);
979 static int ib_umad_close(struct inode *inode, struct file *filp)
981 struct ib_umad_file *file = filp->private_data;
982 struct ib_umad_device *dev = file->port->umad_dev;
983 struct ib_umad_packet *packet, *tmp;
987 mutex_lock(&file->port->file_mutex);
988 mutex_lock(&file->mutex);
990 already_dead = file->agents_dead;
991 file->agents_dead = 1;
993 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) {
995 ib_free_recv_mad(packet->recv_wc);
999 list_del(&file->port_list);
1001 mutex_unlock(&file->mutex);
1004 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
1006 ib_unregister_mad_agent(file->agent[i]);
1008 mutex_unlock(&file->port->file_mutex);
1011 kobject_put(&dev->kobj);
1016 static const struct file_operations umad_fops = {
1017 .owner = THIS_MODULE,
1018 .read = ib_umad_read,
1019 .write = ib_umad_write,
1020 .poll = ib_umad_poll,
1021 .unlocked_ioctl = ib_umad_ioctl,
1022 #ifdef CONFIG_COMPAT
1023 .compat_ioctl = ib_umad_compat_ioctl,
1025 .open = ib_umad_open,
1026 .release = ib_umad_close,
1027 .llseek = no_llseek,
1030 static int ib_umad_sm_open(struct inode *inode, struct file *filp)
1032 struct ib_umad_port *port;
1033 struct ib_port_modify props = {
1034 .set_port_cap_mask = IB_PORT_SM
1038 port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
1040 if (filp->f_flags & O_NONBLOCK) {
1041 if (down_trylock(&port->sm_sem)) {
1046 if (down_interruptible(&port->sm_sem)) {
1052 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1056 filp->private_data = port;
1058 ret = nonseekable_open(inode, filp);
1060 goto err_clr_sm_cap;
1062 kobject_get(&port->umad_dev->kobj);
1067 swap(props.set_port_cap_mask, props.clr_port_cap_mask);
1068 ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1077 static int ib_umad_sm_close(struct inode *inode, struct file *filp)
1079 struct ib_umad_port *port = filp->private_data;
1080 struct ib_port_modify props = {
1081 .clr_port_cap_mask = IB_PORT_SM
1085 mutex_lock(&port->file_mutex);
1087 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
1088 mutex_unlock(&port->file_mutex);
1092 kobject_put(&port->umad_dev->kobj);
1097 static const struct file_operations umad_sm_fops = {
1098 .owner = THIS_MODULE,
1099 .open = ib_umad_sm_open,
1100 .release = ib_umad_sm_close,
1101 .llseek = no_llseek,
1104 static struct ib_client umad_client = {
1106 .add = ib_umad_add_one,
1107 .remove = ib_umad_remove_one
1110 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
1113 struct ib_umad_port *port = dev_get_drvdata(dev);
1118 return sprintf(buf, "%s\n", port->ib_dev->name);
1120 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1122 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
1125 struct ib_umad_port *port = dev_get_drvdata(dev);
1130 return sprintf(buf, "%d\n", port->port_num);
1132 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
1134 static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1135 __stringify(IB_USER_MAD_ABI_VERSION));
1137 static dev_t overflow_maj;
1138 static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
1139 static int find_overflow_devnum(struct ib_device *device)
1143 if (!overflow_maj) {
1144 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
1147 dev_err(&device->dev,
1148 "couldn't register dynamic device number\n");
1153 ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
1154 if (ret >= IB_UMAD_MAX_PORTS)
1160 static int ib_umad_init_port(struct ib_device *device, int port_num,
1161 struct ib_umad_device *umad_dev,
1162 struct ib_umad_port *port)
1167 spin_lock(&port_lock);
1168 devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
1169 if (devnum >= IB_UMAD_MAX_PORTS) {
1170 spin_unlock(&port_lock);
1171 devnum = find_overflow_devnum(device);
1175 spin_lock(&port_lock);
1176 port->dev_num = devnum + IB_UMAD_MAX_PORTS;
1177 base = devnum + overflow_maj;
1178 set_bit(devnum, overflow_map);
1180 port->dev_num = devnum;
1181 base = devnum + base_dev;
1182 set_bit(devnum, dev_map);
1184 spin_unlock(&port_lock);
1186 port->ib_dev = device;
1187 port->port_num = port_num;
1188 sema_init(&port->sm_sem, 1);
1189 mutex_init(&port->file_mutex);
1190 INIT_LIST_HEAD(&port->file_list);
1192 cdev_init(&port->cdev, &umad_fops);
1193 port->cdev.owner = THIS_MODULE;
1194 port->cdev.kobj.parent = &umad_dev->kobj;
1195 kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
1196 if (cdev_add(&port->cdev, base, 1))
1199 port->dev = device_create(umad_class, device->dma_device,
1200 port->cdev.dev, port,
1201 "umad%d", port->dev_num);
1202 if (IS_ERR(port->dev))
1205 if (device_create_file(port->dev, &dev_attr_ibdev))
1207 if (device_create_file(port->dev, &dev_attr_port))
1210 base += IB_UMAD_MAX_PORTS;
1211 cdev_init(&port->sm_cdev, &umad_sm_fops);
1212 port->sm_cdev.owner = THIS_MODULE;
1213 port->sm_cdev.kobj.parent = &umad_dev->kobj;
1214 kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
1215 if (cdev_add(&port->sm_cdev, base, 1))
1218 port->sm_dev = device_create(umad_class, device->dma_device,
1219 port->sm_cdev.dev, port,
1220 "issm%d", port->dev_num);
1221 if (IS_ERR(port->sm_dev))
1224 if (device_create_file(port->sm_dev, &dev_attr_ibdev))
1226 if (device_create_file(port->sm_dev, &dev_attr_port))
1232 device_destroy(umad_class, port->sm_cdev.dev);
1235 cdev_del(&port->sm_cdev);
1238 device_destroy(umad_class, port->cdev.dev);
1241 cdev_del(&port->cdev);
1242 if (port->dev_num < IB_UMAD_MAX_PORTS)
1243 clear_bit(devnum, dev_map);
1245 clear_bit(devnum, overflow_map);
1250 static void ib_umad_kill_port(struct ib_umad_port *port)
1252 struct ib_umad_file *file;
1255 dev_set_drvdata(port->dev, NULL);
1256 dev_set_drvdata(port->sm_dev, NULL);
1258 device_destroy(umad_class, port->cdev.dev);
1259 device_destroy(umad_class, port->sm_cdev.dev);
1261 cdev_del(&port->cdev);
1262 cdev_del(&port->sm_cdev);
1264 mutex_lock(&port->file_mutex);
1266 port->ib_dev = NULL;
1268 list_for_each_entry(file, &port->file_list, port_list) {
1269 mutex_lock(&file->mutex);
1270 file->agents_dead = 1;
1271 mutex_unlock(&file->mutex);
1273 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
1274 if (file->agent[id])
1275 ib_unregister_mad_agent(file->agent[id]);
1278 mutex_unlock(&port->file_mutex);
1280 if (port->dev_num < IB_UMAD_MAX_PORTS)
1281 clear_bit(port->dev_num, dev_map);
1283 clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
1286 static void ib_umad_add_one(struct ib_device *device)
1288 struct ib_umad_device *umad_dev;
1292 s = rdma_start_port(device);
1293 e = rdma_end_port(device);
1295 umad_dev = kzalloc(sizeof *umad_dev +
1296 (e - s + 1) * sizeof (struct ib_umad_port),
1301 kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype);
1303 for (i = s; i <= e; ++i) {
1304 if (!rdma_cap_ib_mad(device, i))
1307 umad_dev->port[i - s].umad_dev = umad_dev;
1309 if (ib_umad_init_port(device, i, umad_dev,
1310 &umad_dev->port[i - s]))
1319 ib_set_client_data(device, &umad_client, umad_dev);
1325 if (!rdma_cap_ib_mad(device, i))
1328 ib_umad_kill_port(&umad_dev->port[i - s]);
1331 kobject_put(&umad_dev->kobj);
1334 static void ib_umad_remove_one(struct ib_device *device, void *client_data)
1336 struct ib_umad_device *umad_dev = client_data;
1342 for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) {
1343 if (rdma_cap_ib_mad(device, i + rdma_start_port(device)))
1344 ib_umad_kill_port(&umad_dev->port[i]);
1347 kobject_put(&umad_dev->kobj);
1350 static char *umad_devnode(struct device *dev, umode_t *mode)
1352 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1355 static int __init ib_umad_init(void)
1359 ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
1362 pr_err("couldn't register device number\n");
1366 umad_class = class_create(THIS_MODULE, "infiniband_mad");
1367 if (IS_ERR(umad_class)) {
1368 ret = PTR_ERR(umad_class);
1369 pr_err("couldn't create class infiniband_mad\n");
1373 umad_class->devnode = umad_devnode;
1375 ret = class_create_file(umad_class, &class_attr_abi_version.attr);
1377 pr_err("couldn't create abi_version attribute\n");
1381 ret = ib_register_client(&umad_client);
1383 pr_err("couldn't register ib_umad client\n");
1390 class_destroy(umad_class);
1393 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
1399 static void __exit ib_umad_cleanup(void)
1401 ib_unregister_client(&umad_client);
1402 class_destroy(umad_class);
1403 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
1405 unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
1408 module_init(ib_umad_init);
1409 module_exit(ib_umad_cleanup);