2 * Copyright(c) 2015-2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/poll.h>
48 #include <linux/cdev.h>
49 #include <linux/vmalloc.h>
51 #include <linux/sched/mm.h>
52 #include <linux/bitmap.h>
62 #include "user_sdma.h"
63 #include "user_exp_rcv.h"
67 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
69 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
72 * File operation functions
74 static int hfi1_file_open(struct inode *inode, struct file *fp);
75 static int hfi1_file_close(struct inode *inode, struct file *fp);
76 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from);
77 static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
78 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
80 static u64 kvirt_to_phys(void *addr);
81 static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo);
82 static void init_subctxts(struct hfi1_ctxtdata *uctxt,
83 const struct hfi1_user_info *uinfo);
84 static int init_user_ctxt(struct hfi1_filedata *fd,
85 struct hfi1_ctxtdata *uctxt);
86 static void user_init(struct hfi1_ctxtdata *uctxt);
87 static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
89 static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
91 static int setup_base_ctxt(struct hfi1_filedata *fd,
92 struct hfi1_ctxtdata *uctxt);
93 static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
95 static int find_sub_ctxt(struct hfi1_filedata *fd,
96 const struct hfi1_user_info *uinfo);
97 static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
98 struct hfi1_user_info *uinfo,
99 struct hfi1_ctxtdata **cd);
100 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
101 static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
102 static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
103 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
104 unsigned long events);
105 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
106 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
108 static int vma_fault(struct vm_fault *vmf);
109 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
112 static const struct file_operations hfi1_file_ops = {
113 .owner = THIS_MODULE,
114 .write_iter = hfi1_write_iter,
115 .open = hfi1_file_open,
116 .release = hfi1_file_close,
117 .unlocked_ioctl = hfi1_file_ioctl,
119 .mmap = hfi1_file_mmap,
120 .llseek = noop_llseek,
123 static const struct vm_operations_struct vm_ops = {
128 * Types of memories mapped into user processes' space
147 * Masks and offsets defining the mmap tokens
149 #define HFI1_MMAP_OFFSET_MASK 0xfffULL
150 #define HFI1_MMAP_OFFSET_SHIFT 0
151 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL
152 #define HFI1_MMAP_SUBCTXT_SHIFT 12
153 #define HFI1_MMAP_CTXT_MASK 0xffULL
154 #define HFI1_MMAP_CTXT_SHIFT 16
155 #define HFI1_MMAP_TYPE_MASK 0xfULL
156 #define HFI1_MMAP_TYPE_SHIFT 24
157 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
158 #define HFI1_MMAP_MAGIC_SHIFT 32
160 #define HFI1_MMAP_MAGIC 0xdabbad00
162 #define HFI1_MMAP_TOKEN_SET(field, val) \
163 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
164 #define HFI1_MMAP_TOKEN_GET(field, token) \
165 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
166 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
167 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
168 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
169 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
170 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
171 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
173 #define dbg(fmt, ...) \
174 pr_info(fmt, ##__VA_ARGS__)
176 static inline int is_valid_mmap(u64 token)
178 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
181 static int hfi1_file_open(struct inode *inode, struct file *fp)
183 struct hfi1_filedata *fd;
184 struct hfi1_devdata *dd = container_of(inode->i_cdev,
188 if (!((dd->flags & HFI1_PRESENT) && dd->kregbase1))
191 if (!atomic_inc_not_zero(&dd->user_refcount))
194 /* The real work is performed later in assign_ctxt() */
196 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
198 if (!fd || init_srcu_struct(&fd->pq_srcu))
200 spin_lock_init(&fd->pq_rcu_lock);
201 spin_lock_init(&fd->tid_lock);
202 spin_lock_init(&fd->invalid_lock);
203 fd->rec_cpu_num = -1; /* no cpu affinity by default */
204 fd->mm = current->mm;
207 kobject_get(&fd->dd->kobj);
208 fp->private_data = fd;
212 fp->private_data = NULL;
213 if (atomic_dec_and_test(&dd->user_refcount))
214 complete(&dd->user_comp);
218 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
221 struct hfi1_filedata *fd = fp->private_data;
222 struct hfi1_ctxtdata *uctxt = fd->uctxt;
223 struct hfi1_user_info uinfo;
224 struct hfi1_tid_info tinfo;
228 unsigned long ul_uval = 0;
231 hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
232 if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
233 cmd != HFI1_IOCTL_GET_VERS &&
238 case HFI1_IOCTL_ASSIGN_CTXT:
242 if (copy_from_user(&uinfo,
243 (struct hfi1_user_info __user *)arg,
247 ret = assign_ctxt(fd, &uinfo);
249 case HFI1_IOCTL_CTXT_INFO:
250 ret = get_ctxt_info(fd, (void __user *)(unsigned long)arg,
251 sizeof(struct hfi1_ctxt_info));
253 case HFI1_IOCTL_USER_INFO:
254 ret = get_base_info(fd, (void __user *)(unsigned long)arg,
255 sizeof(struct hfi1_base_info));
257 case HFI1_IOCTL_CREDIT_UPD:
259 sc_return_credits(uctxt->sc);
262 case HFI1_IOCTL_TID_UPDATE:
263 if (copy_from_user(&tinfo,
264 (struct hfi11_tid_info __user *)arg,
268 ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
271 * Copy the number of tidlist entries we used
272 * and the length of the buffer we registered.
274 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
275 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
276 sizeof(tinfo.tidcnt)))
279 addr = arg + offsetof(struct hfi1_tid_info, length);
280 if (copy_to_user((void __user *)addr, &tinfo.length,
281 sizeof(tinfo.length)))
286 case HFI1_IOCTL_TID_FREE:
287 if (copy_from_user(&tinfo,
288 (struct hfi11_tid_info __user *)arg,
292 ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
295 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
296 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
297 sizeof(tinfo.tidcnt)))
301 case HFI1_IOCTL_TID_INVAL_READ:
302 if (copy_from_user(&tinfo,
303 (struct hfi11_tid_info __user *)arg,
307 ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
310 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
311 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
312 sizeof(tinfo.tidcnt)))
316 case HFI1_IOCTL_RECV_CTRL:
317 ret = get_user(uval, (int __user *)arg);
320 ret = manage_rcvq(uctxt, fd->subctxt, uval);
323 case HFI1_IOCTL_POLL_TYPE:
324 ret = get_user(uval, (int __user *)arg);
327 uctxt->poll_type = (typeof(uctxt->poll_type))uval;
330 case HFI1_IOCTL_ACK_EVENT:
331 ret = get_user(ul_uval, (unsigned long __user *)arg);
334 ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
337 case HFI1_IOCTL_SET_PKEY:
338 ret = get_user(uval16, (u16 __user *)arg);
341 if (HFI1_CAP_IS_USET(PKEY_CHECK))
342 ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
347 case HFI1_IOCTL_CTXT_RESET: {
348 struct send_context *sc;
349 struct hfi1_devdata *dd;
351 if (!uctxt || !uctxt->dd || !uctxt->sc)
355 * There is no protection here. User level has to
356 * guarantee that no one will be writing to the send
357 * context while it is being re-initialized.
358 * If user level breaks that guarantee, it will break
359 * it's own context and no one else's.
364 * Wait until the interrupt handler has marked the
365 * context as halted or frozen. Report error if we time
368 wait_event_interruptible_timeout(
369 sc->halt_wait, (sc->flags & SCF_HALTED),
370 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
371 if (!(sc->flags & SCF_HALTED))
375 * If the send context was halted due to a Freeze,
376 * wait until the device has been "unfrozen" before
377 * resetting the context.
379 if (sc->flags & SCF_FROZEN) {
380 wait_event_interruptible_timeout(
382 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
383 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
384 if (dd->flags & HFI1_FROZEN)
387 if (dd->flags & HFI1_FORCED_FREEZE)
389 * Don't allow context reset if we are into
396 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
398 ret = sc_restart(sc);
401 sc_return_credits(sc);
405 case HFI1_IOCTL_GET_VERS:
406 uval = HFI1_USER_SWVERSION;
407 if (put_user(uval, (int __user *)arg))
418 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
420 struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
421 struct hfi1_user_sdma_pkt_q *pq;
422 struct hfi1_user_sdma_comp_q *cq = fd->cq;
423 int done = 0, reqs = 0;
424 unsigned long dim = from->nr_segs;
427 if (!HFI1_CAP_IS_KSET(SDMA))
429 idx = srcu_read_lock(&fd->pq_srcu);
430 pq = srcu_dereference(fd->pq, &fd->pq_srcu);
432 srcu_read_unlock(&fd->pq_srcu, idx);
436 if (!iter_is_iovec(from) || !dim) {
437 srcu_read_unlock(&fd->pq_srcu, idx);
441 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
443 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
444 srcu_read_unlock(&fd->pq_srcu, idx);
450 unsigned long count = 0;
452 ret = hfi1_user_sdma_process_request(
453 fd, (struct iovec *)(from->iov + done),
464 srcu_read_unlock(&fd->pq_srcu, idx);
468 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
470 struct hfi1_filedata *fd = fp->private_data;
471 struct hfi1_ctxtdata *uctxt = fd->uctxt;
472 struct hfi1_devdata *dd;
474 u64 token = vma->vm_pgoff << PAGE_SHIFT,
476 void *memvirt = NULL;
477 u8 subctxt, mapio = 0, vmf = 0, type;
482 if (!is_valid_mmap(token) || !uctxt ||
483 !(vma->vm_flags & VM_SHARED)) {
488 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
489 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
490 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
491 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
496 flags = vma->vm_flags;
501 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
503 (uctxt->sc->hw_context * BIT(16))) +
504 /* 64K PIO space / ctxt */
505 (type == PIO_BUFS_SOP ?
506 (TXE_PIO_SIZE / 2) : 0); /* sop? */
508 * Map only the amount allocated to the context, not the
509 * entire available context's PIO space.
511 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
512 flags &= ~VM_MAYREAD;
513 flags |= VM_DONTCOPY | VM_DONTEXPAND;
514 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
518 if (flags & VM_WRITE) {
523 * The credit return location for this context could be on the
524 * second or third page allocated for credit returns (if number
525 * of enabled contexts > 64 and 128 respectively).
527 memvirt = dd->cr_base[uctxt->numa_id].va;
528 memaddr = virt_to_phys(memvirt) +
529 (((u64)uctxt->sc->hw_free -
530 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
532 flags &= ~VM_MAYWRITE;
533 flags |= VM_DONTCOPY | VM_DONTEXPAND;
535 * The driver has already allocated memory for credit
536 * returns and programmed it into the chip. Has that
537 * memory been flagged as non-cached?
539 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
543 memlen = uctxt->rcvhdrq_size;
544 memvirt = uctxt->rcvhdrq;
550 * The RcvEgr buffer need to be handled differently
551 * as multiple non-contiguous pages need to be mapped
552 * into the user process.
554 memlen = uctxt->egrbufs.size;
555 if ((vma->vm_end - vma->vm_start) != memlen) {
556 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
557 (vma->vm_end - vma->vm_start), memlen);
561 if (vma->vm_flags & VM_WRITE) {
565 vma->vm_flags &= ~VM_MAYWRITE;
566 addr = vma->vm_start;
567 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
568 memlen = uctxt->egrbufs.buffers[i].len;
569 memvirt = uctxt->egrbufs.buffers[i].addr;
570 ret = remap_pfn_range(
573 * virt_to_pfn() does the same, but
574 * it's not available on x86_64
575 * when CONFIG_MMU is enabled.
577 PFN_DOWN(__pa(memvirt)),
589 * Map only the page that contains this context's user
592 memaddr = (unsigned long)
593 (dd->physaddr + RXE_PER_CONTEXT_USER)
594 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
596 * TidFlow table is on the same page as the rest of the
600 flags |= VM_DONTCOPY | VM_DONTEXPAND;
601 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
606 * Use the page where this context's flags are. User level
607 * knows where it's own bitmap is within the page.
609 memaddr = (unsigned long)(dd->events +
610 ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
611 HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
614 * v3.7 removes VM_RESERVED but the effect is kept by
617 flags |= VM_IO | VM_DONTEXPAND;
621 if (flags & VM_WRITE) {
625 memaddr = kvirt_to_phys((void *)dd->status);
627 flags |= VM_IO | VM_DONTEXPAND;
630 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
632 * If the memory allocation failed, the context alloc
633 * also would have failed, so we would never get here
638 if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
643 memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
644 flags &= ~VM_MAYWRITE;
647 memaddr = (u64)uctxt->subctxt_uregbase;
649 flags |= VM_IO | VM_DONTEXPAND;
652 case SUBCTXT_RCV_HDRQ:
653 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
654 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
655 flags |= VM_IO | VM_DONTEXPAND;
659 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
660 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
661 flags |= VM_IO | VM_DONTEXPAND;
662 flags &= ~VM_MAYWRITE;
666 struct hfi1_user_sdma_comp_q *cq = fd->cq;
672 memaddr = (u64)cq->comps;
673 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
674 flags |= VM_IO | VM_DONTEXPAND;
683 if ((vma->vm_end - vma->vm_start) != memlen) {
684 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
685 uctxt->ctxt, fd->subctxt,
686 (vma->vm_end - vma->vm_start), memlen);
691 vma->vm_flags = flags;
693 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
694 ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
695 vma->vm_end - vma->vm_start, vma->vm_flags);
697 vma->vm_pgoff = PFN_DOWN(memaddr);
698 vma->vm_ops = &vm_ops;
701 ret = io_remap_pfn_range(vma, vma->vm_start,
705 } else if (memvirt) {
706 ret = remap_pfn_range(vma, vma->vm_start,
707 PFN_DOWN(__pa(memvirt)),
711 ret = remap_pfn_range(vma, vma->vm_start,
721 * Local (non-chip) user memory is not mapped right away but as it is
722 * accessed by the user-level code.
724 static int vma_fault(struct vm_fault *vmf)
728 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
730 return VM_FAULT_SIGBUS;
738 static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
740 struct hfi1_ctxtdata *uctxt;
743 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
746 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
747 pollflag = poll_urgent(fp, pt);
748 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
749 pollflag = poll_next(fp, pt);
756 static int hfi1_file_close(struct inode *inode, struct file *fp)
758 struct hfi1_filedata *fdata = fp->private_data;
759 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
760 struct hfi1_devdata *dd = container_of(inode->i_cdev,
763 unsigned long flags, *ev;
765 fp->private_data = NULL;
770 hfi1_cdbg(PROC, "closing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
773 /* drain user sdma queue */
774 hfi1_user_sdma_free_queues(fdata, uctxt);
776 /* release the cpu */
777 hfi1_put_proc_affinity(fdata->rec_cpu_num);
779 /* clean up rcv side */
780 hfi1_user_exp_rcv_free(fdata);
783 * fdata->uctxt is used in the above cleanup. It is not ready to be
784 * removed until here.
790 * Clear any left over, unhandled events so the next process that
791 * gets this context doesn't get confused.
793 ev = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
794 HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
797 spin_lock_irqsave(&dd->uctxt_lock, flags);
798 __clear_bit(fdata->subctxt, uctxt->in_use_ctxts);
799 if (!bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
800 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
803 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
806 * Disable receive context and interrupt available, reset all
807 * RcvCtxtCtrl bits to default values.
809 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
810 HFI1_RCVCTRL_TIDFLOW_DIS |
811 HFI1_RCVCTRL_INTRAVAIL_DIS |
812 HFI1_RCVCTRL_TAILUPD_DIS |
813 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
814 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
815 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
816 /* Clear the context's J_KEY */
817 hfi1_clear_ctxt_jkey(dd, uctxt);
819 * If a send context is allocated, reset context integrity
820 * checks to default and disable the send context.
823 sc_disable(uctxt->sc);
824 set_pio_integrity(uctxt->sc);
827 hfi1_free_ctxt_rcv_groups(uctxt);
828 hfi1_clear_ctxt_pkey(dd, uctxt);
830 uctxt->event_flags = 0;
832 deallocate_ctxt(uctxt);
835 kobject_put(&dd->kobj);
837 if (atomic_dec_and_test(&dd->user_refcount))
838 complete(&dd->user_comp);
840 cleanup_srcu_struct(&fdata->pq_srcu);
846 * Convert kernel *virtual* addresses to physical addresses.
847 * This is used to vmalloc'ed addresses.
849 static u64 kvirt_to_phys(void *addr)
854 page = vmalloc_to_page(addr);
856 paddr = page_to_pfn(page) << PAGE_SHIFT;
863 * @fd: valid filedata pointer
865 * Sub-context info can only be set up after the base context
866 * has been completed. This is indicated by the clearing of the
867 * HFI1_CTXT_BASE_UINIT bit.
869 * Wait for the bit to be cleared, and then complete the subcontext
873 static int complete_subctxt(struct hfi1_filedata *fd)
879 * sub-context info can only be set up after the base context
880 * has been completed.
882 ret = wait_event_interruptible(
884 !test_bit(HFI1_CTXT_BASE_UNINIT, &fd->uctxt->event_flags));
886 if (test_bit(HFI1_CTXT_BASE_FAILED, &fd->uctxt->event_flags))
889 /* Finish the sub-context init */
891 fd->rec_cpu_num = hfi1_get_proc_affinity(fd->uctxt->numa_id);
892 ret = init_user_ctxt(fd, fd->uctxt);
896 spin_lock_irqsave(&fd->dd->uctxt_lock, flags);
897 __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts);
898 spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags);
899 hfi1_rcd_put(fd->uctxt);
906 static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
909 unsigned int swmajor, swminor;
910 struct hfi1_ctxtdata *uctxt = NULL;
912 swmajor = uinfo->userversion >> 16;
913 if (swmajor != HFI1_USER_SWMAJOR)
916 if (uinfo->subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
919 swminor = uinfo->userversion & 0xffff;
922 * Acquire the mutex to protect against multiple creations of what
923 * could be a shared base context.
925 mutex_lock(&hfi1_mutex);
927 * Get a sub context if available (fd->uctxt will be set).
928 * ret < 0 error, 0 no context, 1 sub-context found
930 ret = find_sub_ctxt(fd, uinfo);
933 * Allocate a base context if context sharing is not required or a
934 * sub context wasn't found.
937 ret = allocate_ctxt(fd, fd->dd, uinfo, &uctxt);
939 mutex_unlock(&hfi1_mutex);
941 /* Depending on the context type, finish the appropriate init */
944 ret = setup_base_ctxt(fd, uctxt);
946 deallocate_ctxt(uctxt);
949 ret = complete_subctxt(fd);
960 * @fd: valid filedata pointer
961 * @uinfo: user info to compare base context with
962 * @uctxt: context to compare uinfo to.
964 * Compare the given context with the given information to see if it
965 * can be used for a sub context.
967 static int match_ctxt(struct hfi1_filedata *fd,
968 const struct hfi1_user_info *uinfo,
969 struct hfi1_ctxtdata *uctxt)
971 struct hfi1_devdata *dd = fd->dd;
975 /* Skip dynamically allocated kernel contexts */
976 if (uctxt->sc && (uctxt->sc->type == SC_KERNEL))
979 /* Skip ctxt if it doesn't match the requested one */
980 if (memcmp(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid)) ||
981 uctxt->jkey != generate_jkey(current_uid()) ||
982 uctxt->subctxt_id != uinfo->subctxt_id ||
983 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
986 /* Verify the sharing process matches the base */
987 if (uctxt->userversion != uinfo->userversion)
990 /* Find an unused sub context */
991 spin_lock_irqsave(&dd->uctxt_lock, flags);
992 if (bitmap_empty(uctxt->in_use_ctxts, HFI1_MAX_SHARED_CTXTS)) {
993 /* context is being closed, do not use */
994 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
998 subctxt = find_first_zero_bit(uctxt->in_use_ctxts,
999 HFI1_MAX_SHARED_CTXTS);
1000 if (subctxt >= uctxt->subctxt_cnt) {
1001 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1005 fd->subctxt = subctxt;
1006 __set_bit(fd->subctxt, uctxt->in_use_ctxts);
1007 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1010 hfi1_rcd_get(uctxt);
1017 * @fd: valid filedata pointer
1018 * @uinfo: matching info to use to find a possible context to share.
1020 * The hfi1_mutex must be held when this function is called. It is
1021 * necessary to ensure serialized creation of shared contexts.
1024 * 0 No sub-context found
1025 * 1 Subcontext found and allocated
1026 * errno EINVAL (incorrect parameters)
1027 * EBUSY (all sub contexts in use)
1029 static int find_sub_ctxt(struct hfi1_filedata *fd,
1030 const struct hfi1_user_info *uinfo)
1032 struct hfi1_ctxtdata *uctxt;
1033 struct hfi1_devdata *dd = fd->dd;
1037 if (!uinfo->subctxt_cnt)
1040 for (i = dd->first_dyn_alloc_ctxt; i < dd->num_rcv_contexts; i++) {
1041 uctxt = hfi1_rcd_get_by_index(dd, i);
1043 ret = match_ctxt(fd, uinfo, uctxt);
1044 hfi1_rcd_put(uctxt);
1045 /* value of != 0 will return */
1054 static int allocate_ctxt(struct hfi1_filedata *fd, struct hfi1_devdata *dd,
1055 struct hfi1_user_info *uinfo,
1056 struct hfi1_ctxtdata **rcd)
1058 struct hfi1_ctxtdata *uctxt;
1061 if (dd->flags & HFI1_FROZEN) {
1063 * Pick an error that is unique from all other errors
1064 * that are returned so the user process knows that
1065 * it tried to allocate while the SPC was frozen. It
1066 * it should be able to retry with success in a short
1076 * If we don't have a NUMA node requested, preference is towards
1079 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
1080 if (fd->rec_cpu_num != -1)
1081 numa = cpu_to_node(fd->rec_cpu_num);
1083 numa = numa_node_id();
1084 ret = hfi1_create_ctxtdata(dd->pport, numa, &uctxt);
1086 dd_dev_err(dd, "user ctxtdata allocation failed\n");
1089 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
1090 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
1094 * Allocate and enable a PIO send context.
1096 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize, dd->node);
1101 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
1102 uctxt->sc->hw_context);
1103 ret = sc_enable(uctxt->sc);
1108 * Setup sub context information if the user-level has requested
1110 * This has to be done here so the rest of the sub-contexts find the
1111 * proper base context.
1113 if (uinfo->subctxt_cnt)
1114 init_subctxts(uctxt, uinfo);
1115 uctxt->userversion = uinfo->userversion;
1116 uctxt->flags = hfi1_cap_mask; /* save current flag state */
1117 init_waitqueue_head(&uctxt->wait);
1118 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1119 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1120 uctxt->jkey = generate_jkey(current_uid());
1121 hfi1_stats.sps_ctxts++;
1123 * Disable ASPM when there are open user/PSM contexts to avoid
1124 * issues with ASPM L1 exit latency
1126 if (dd->freectxts-- == dd->num_user_contexts)
1127 aspm_disable_all(dd);
1134 hfi1_free_ctxt(uctxt);
1138 static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt)
1140 mutex_lock(&hfi1_mutex);
1141 hfi1_stats.sps_ctxts--;
1142 if (++uctxt->dd->freectxts == uctxt->dd->num_user_contexts)
1143 aspm_enable_all(uctxt->dd);
1144 mutex_unlock(&hfi1_mutex);
1146 hfi1_free_ctxt(uctxt);
1149 static void init_subctxts(struct hfi1_ctxtdata *uctxt,
1150 const struct hfi1_user_info *uinfo)
1152 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1153 uctxt->subctxt_id = uinfo->subctxt_id;
1154 set_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
1157 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1160 u16 num_subctxts = uctxt->subctxt_cnt;
1162 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1163 if (!uctxt->subctxt_uregbase)
1166 /* We can take the size of the RcvHdr Queue from the master */
1167 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1169 if (!uctxt->subctxt_rcvhdr_base) {
1174 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1176 if (!uctxt->subctxt_rcvegrbuf) {
1184 vfree(uctxt->subctxt_rcvhdr_base);
1185 uctxt->subctxt_rcvhdr_base = NULL;
1187 vfree(uctxt->subctxt_uregbase);
1188 uctxt->subctxt_uregbase = NULL;
1193 static void user_init(struct hfi1_ctxtdata *uctxt)
1195 unsigned int rcvctrl_ops = 0;
1197 /* initialize poll variables... */
1199 uctxt->urgent_poll = 0;
1202 * Now enable the ctxt for receive.
1203 * For chips that are set to DMA the tail register to memory
1204 * when they change (and when the update bit transitions from
1205 * 0 to 1. So for those chips, we turn it off and then back on.
1206 * This will (very briefly) affect any other open ctxts, but the
1207 * duration is very short, and therefore isn't an issue. We
1208 * explicitly set the in-memory tail copy to 0 beforehand, so we
1209 * don't have to wait to be sure the DMA update has happened
1210 * (chip resets head/tail to 0 on transition to enable).
1212 if (uctxt->rcvhdrtail_kvaddr)
1213 clear_rcvhdrtail(uctxt);
1215 /* Setup J_KEY before enabling the context */
1216 hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
1218 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
1219 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
1220 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1222 * Ignore the bit in the flags for now until proper
1223 * support for multiple packet per rcv array entry is
1226 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1227 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
1228 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1229 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
1230 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1231 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
1233 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1234 * We can't rely on the correct value to be set from prior
1235 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1238 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
1239 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
1241 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
1242 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
1245 static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
1248 struct hfi1_ctxt_info cinfo;
1249 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1252 memset(&cinfo, 0, sizeof(cinfo));
1253 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
1254 HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
1255 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
1256 HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
1257 /* adjust flag if this fd is not able to cache */
1259 cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
1261 cinfo.num_active = hfi1_count_active_units();
1262 cinfo.unit = uctxt->dd->unit;
1263 cinfo.ctxt = uctxt->ctxt;
1264 cinfo.subctxt = fd->subctxt;
1265 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1266 uctxt->dd->rcv_entries.group_size) +
1267 uctxt->expected_count;
1268 cinfo.credits = uctxt->sc->credits;
1269 cinfo.numa_node = uctxt->numa_id;
1270 cinfo.rec_cpu = fd->rec_cpu_num;
1271 cinfo.send_ctxt = uctxt->sc->hw_context;
1273 cinfo.egrtids = uctxt->egrbufs.alloced;
1274 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1275 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
1276 cinfo.sdma_ring_size = fd->cq->nentries;
1277 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1279 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
1280 if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
1286 static int init_user_ctxt(struct hfi1_filedata *fd,
1287 struct hfi1_ctxtdata *uctxt)
1291 ret = hfi1_user_sdma_alloc_queues(uctxt, fd);
1295 ret = hfi1_user_exp_rcv_init(fd, uctxt);
1297 hfi1_user_sdma_free_queues(fd, uctxt);
1302 static int setup_base_ctxt(struct hfi1_filedata *fd,
1303 struct hfi1_ctxtdata *uctxt)
1305 struct hfi1_devdata *dd = uctxt->dd;
1308 hfi1_init_ctxt(uctxt->sc);
1310 /* Now allocate the RcvHdr queue and eager buffers. */
1311 ret = hfi1_create_rcvhdrq(dd, uctxt);
1315 ret = hfi1_setup_eagerbufs(uctxt);
1319 /* If sub-contexts are enabled, do the appropriate setup */
1320 if (uctxt->subctxt_cnt)
1321 ret = setup_subctxt(uctxt);
1325 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
1329 ret = init_user_ctxt(fd, uctxt);
1331 hfi1_free_ctxt_rcv_groups(uctxt);
1337 /* Now that the context is set up, the fd can get a reference. */
1339 hfi1_rcd_get(uctxt);
1342 if (uctxt->subctxt_cnt) {
1344 * On error, set the failed bit so sub-contexts will clean up
1348 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1351 * Base context is done (successfully or not), notify anybody
1352 * using a sub-context that is waiting for this completion.
1354 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
1355 wake_up(&uctxt->wait);
1361 static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
1364 struct hfi1_base_info binfo;
1365 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1366 struct hfi1_devdata *dd = uctxt->dd;
1371 trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
1373 memset(&binfo, 0, sizeof(binfo));
1374 binfo.hw_version = dd->revision;
1375 binfo.sw_version = HFI1_KERN_SWVERSION;
1376 binfo.bthqp = kdeth_qp;
1377 binfo.jkey = uctxt->jkey;
1379 * If more than 64 contexts are enabled the allocated credit
1380 * return will span two or three contiguous pages. Since we only
1381 * map the page containing the context's credit return address,
1382 * we need to calculate the offset in the proper page.
1384 offset = ((u64)uctxt->sc->hw_free -
1385 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1386 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1387 fd->subctxt, offset);
1388 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1390 uctxt->sc->base_addr);
1391 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1394 uctxt->sc->base_addr);
1395 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1398 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1400 uctxt->egrbufs.rcvtids[0].dma);
1401 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1405 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1407 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1409 offset = offset_in_page((((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
1410 HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
1411 sizeof(*dd->events));
1412 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1415 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1418 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1419 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1421 if (uctxt->subctxt_cnt) {
1422 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
1425 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
1428 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
1432 sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1433 if (copy_to_user(ubase, &binfo, sz))
1438 static unsigned int poll_urgent(struct file *fp,
1439 struct poll_table_struct *pt)
1441 struct hfi1_filedata *fd = fp->private_data;
1442 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1443 struct hfi1_devdata *dd = uctxt->dd;
1446 poll_wait(fp, &uctxt->wait, pt);
1448 spin_lock_irq(&dd->uctxt_lock);
1449 if (uctxt->urgent != uctxt->urgent_poll) {
1450 pollflag = POLLIN | POLLRDNORM;
1451 uctxt->urgent_poll = uctxt->urgent;
1454 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1456 spin_unlock_irq(&dd->uctxt_lock);
1461 static unsigned int poll_next(struct file *fp,
1462 struct poll_table_struct *pt)
1464 struct hfi1_filedata *fd = fp->private_data;
1465 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1466 struct hfi1_devdata *dd = uctxt->dd;
1469 poll_wait(fp, &uctxt->wait, pt);
1471 spin_lock_irq(&dd->uctxt_lock);
1472 if (hdrqempty(uctxt)) {
1473 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1474 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt);
1477 pollflag = POLLIN | POLLRDNORM;
1479 spin_unlock_irq(&dd->uctxt_lock);
1485 * Find all user contexts in use, and set the specified bit in their
1487 * See also find_ctxt() for a similar use, that is specific to send buffers.
1489 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1491 struct hfi1_ctxtdata *uctxt;
1492 struct hfi1_devdata *dd = ppd->dd;
1498 for (ctxt = dd->first_dyn_alloc_ctxt; ctxt < dd->num_rcv_contexts;
1500 uctxt = hfi1_rcd_get_by_index(dd, ctxt);
1502 unsigned long *evs = dd->events +
1503 (uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
1504 HFI1_MAX_SHARED_CTXTS;
1507 * subctxt_cnt is 0 if not shared, so do base
1508 * separately, first, then remaining subctxt, if any
1510 set_bit(evtbit, evs);
1511 for (i = 1; i < uctxt->subctxt_cnt; i++)
1512 set_bit(evtbit, evs + i);
1513 hfi1_rcd_put(uctxt);
1521 * manage_rcvq - manage a context's receive queue
1522 * @uctxt: the context
1523 * @subctxt: the sub-context
1524 * @start_stop: action to carry out
1526 * start_stop == 0 disables receive on the context, for use in queue
1527 * overflow conditions. start_stop==1 re-enables, to be used to
1528 * re-init the software copy of the head register
1530 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
1533 struct hfi1_devdata *dd = uctxt->dd;
1534 unsigned int rcvctrl_op;
1538 /* atomically clear receive enable ctxt. */
1541 * On enable, force in-memory copy of the tail register to
1542 * 0, so that protocol code doesn't have to worry about
1543 * whether or not the chip has yet updated the in-memory
1544 * copy or not on return from the system call. The chip
1545 * always resets it's tail register back to 0 on a
1546 * transition from disabled to enabled.
1548 if (uctxt->rcvhdrtail_kvaddr)
1549 clear_rcvhdrtail(uctxt);
1550 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
1552 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
1554 hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
1555 /* always; new head should be equal to new tail; see above */
1561 * clear the event notifier events for this context.
1562 * User process then performs actions appropriate to bit having been
1563 * set, if desired, and checks again in future.
1565 static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
1566 unsigned long events)
1569 struct hfi1_devdata *dd = uctxt->dd;
1575 evs = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
1576 HFI1_MAX_SHARED_CTXTS) + subctxt;
1578 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1579 if (!test_bit(i, &events))
1586 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
1588 int ret = -ENOENT, i, intable = 0;
1589 struct hfi1_pportdata *ppd = uctxt->ppd;
1590 struct hfi1_devdata *dd = uctxt->dd;
1592 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1597 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1598 if (pkey == ppd->pkeys[i]) {
1604 ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
1609 static void user_remove(struct hfi1_devdata *dd)
1612 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
1615 static int user_add(struct hfi1_devdata *dd)
1620 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
1621 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
1622 &dd->user_cdev, &dd->user_device,
1631 * Create per-unit files in /dev
1633 int hfi1_device_create(struct hfi1_devdata *dd)
1635 return user_add(dd);
1639 * Remove per-unit files in /dev
1640 * void, core kernel returns no errors for this stuff
1642 void hfi1_device_remove(struct hfi1_devdata *dd)