2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/poll.h>
48 #include <linux/cdev.h>
49 #include <linux/vmalloc.h>
59 #include "user_sdma.h"
60 #include "user_exp_rcv.h"
65 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
67 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
70 * File operation functions
72 static int hfi1_file_open(struct inode *, struct file *);
73 static int hfi1_file_close(struct inode *, struct file *);
74 static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
75 static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
76 static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
78 static u64 kvirt_to_phys(void *);
79 static int assign_ctxt(struct file *, struct hfi1_user_info *);
80 static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
81 static int user_init(struct file *);
82 static int get_ctxt_info(struct file *, void __user *, __u32);
83 static int get_base_info(struct file *, void __user *, __u32);
84 static int setup_ctxt(struct file *);
85 static int setup_subctxt(struct hfi1_ctxtdata *);
86 static int get_user_context(struct file *, struct hfi1_user_info *, int);
87 static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
88 static int allocate_ctxt(struct file *, struct hfi1_devdata *,
89 struct hfi1_user_info *);
90 static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
91 static unsigned int poll_next(struct file *, struct poll_table_struct *);
92 static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
93 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
94 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
95 static int vma_fault(struct vm_area_struct *, struct vm_fault *);
96 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
99 static const struct file_operations hfi1_file_ops = {
100 .owner = THIS_MODULE,
101 .write_iter = hfi1_write_iter,
102 .open = hfi1_file_open,
103 .release = hfi1_file_close,
104 .unlocked_ioctl = hfi1_file_ioctl,
106 .mmap = hfi1_file_mmap,
107 .llseek = noop_llseek,
110 static struct vm_operations_struct vm_ops = {
115 * Types of memories mapped into user processes' space
134 * Masks and offsets defining the mmap tokens
136 #define HFI1_MMAP_OFFSET_MASK 0xfffULL
137 #define HFI1_MMAP_OFFSET_SHIFT 0
138 #define HFI1_MMAP_SUBCTXT_MASK 0xfULL
139 #define HFI1_MMAP_SUBCTXT_SHIFT 12
140 #define HFI1_MMAP_CTXT_MASK 0xffULL
141 #define HFI1_MMAP_CTXT_SHIFT 16
142 #define HFI1_MMAP_TYPE_MASK 0xfULL
143 #define HFI1_MMAP_TYPE_SHIFT 24
144 #define HFI1_MMAP_MAGIC_MASK 0xffffffffULL
145 #define HFI1_MMAP_MAGIC_SHIFT 32
147 #define HFI1_MMAP_MAGIC 0xdabbad00
149 #define HFI1_MMAP_TOKEN_SET(field, val) \
150 (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
151 #define HFI1_MMAP_TOKEN_GET(field, token) \
152 (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
153 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \
154 (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
155 HFI1_MMAP_TOKEN_SET(TYPE, type) | \
156 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
157 HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
158 HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
160 #define dbg(fmt, ...) \
161 pr_info(fmt, ##__VA_ARGS__)
163 static inline int is_valid_mmap(u64 token)
165 return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
168 static int hfi1_file_open(struct inode *inode, struct file *fp)
170 struct hfi1_filedata *fd;
171 struct hfi1_devdata *dd = container_of(inode->i_cdev,
175 if (!atomic_inc_not_zero(&dd->user_refcount))
178 /* Just take a ref now. Not all opens result in a context assign */
179 kobject_get(&dd->kobj);
181 /* The real work is performed later in assign_ctxt() */
183 fd = kzalloc(sizeof(*fd), GFP_KERNEL);
186 fd->rec_cpu_num = -1; /* no cpu affinity by default */
187 fd->mm = current->mm;
188 atomic_inc(&fd->mm->mm_count);
189 fp->private_data = fd;
191 fp->private_data = NULL;
193 if (atomic_dec_and_test(&dd->user_refcount))
194 complete(&dd->user_comp);
202 static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
205 struct hfi1_filedata *fd = fp->private_data;
206 struct hfi1_ctxtdata *uctxt = fd->uctxt;
207 struct hfi1_user_info uinfo;
208 struct hfi1_tid_info tinfo;
212 unsigned long ul_uval = 0;
215 hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
216 if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
217 cmd != HFI1_IOCTL_GET_VERS &&
222 case HFI1_IOCTL_ASSIGN_CTXT:
226 if (copy_from_user(&uinfo,
227 (struct hfi1_user_info __user *)arg,
231 ret = assign_ctxt(fp, &uinfo);
234 ret = setup_ctxt(fp);
239 case HFI1_IOCTL_CTXT_INFO:
240 ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg,
241 sizeof(struct hfi1_ctxt_info));
243 case HFI1_IOCTL_USER_INFO:
244 ret = get_base_info(fp, (void __user *)(unsigned long)arg,
245 sizeof(struct hfi1_base_info));
247 case HFI1_IOCTL_CREDIT_UPD:
249 sc_return_credits(uctxt->sc);
252 case HFI1_IOCTL_TID_UPDATE:
253 if (copy_from_user(&tinfo,
254 (struct hfi11_tid_info __user *)arg,
258 ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
261 * Copy the number of tidlist entries we used
262 * and the length of the buffer we registered.
263 * These fields are adjacent in the structure so
264 * we can copy them at the same time.
266 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
267 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
268 sizeof(tinfo.tidcnt) +
269 sizeof(tinfo.length)))
274 case HFI1_IOCTL_TID_FREE:
275 if (copy_from_user(&tinfo,
276 (struct hfi11_tid_info __user *)arg,
280 ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
283 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
284 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
285 sizeof(tinfo.tidcnt)))
289 case HFI1_IOCTL_TID_INVAL_READ:
290 if (copy_from_user(&tinfo,
291 (struct hfi11_tid_info __user *)arg,
295 ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
298 addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
299 if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
300 sizeof(tinfo.tidcnt)))
304 case HFI1_IOCTL_RECV_CTRL:
305 ret = get_user(uval, (int __user *)arg);
308 ret = manage_rcvq(uctxt, fd->subctxt, uval);
311 case HFI1_IOCTL_POLL_TYPE:
312 ret = get_user(uval, (int __user *)arg);
315 uctxt->poll_type = (typeof(uctxt->poll_type))uval;
318 case HFI1_IOCTL_ACK_EVENT:
319 ret = get_user(ul_uval, (unsigned long __user *)arg);
322 ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
325 case HFI1_IOCTL_SET_PKEY:
326 ret = get_user(uval16, (u16 __user *)arg);
329 if (HFI1_CAP_IS_USET(PKEY_CHECK))
330 ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
335 case HFI1_IOCTL_CTXT_RESET: {
336 struct send_context *sc;
337 struct hfi1_devdata *dd;
339 if (!uctxt || !uctxt->dd || !uctxt->sc)
343 * There is no protection here. User level has to
344 * guarantee that no one will be writing to the send
345 * context while it is being re-initialized.
346 * If user level breaks that guarantee, it will break
347 * it's own context and no one else's.
352 * Wait until the interrupt handler has marked the
353 * context as halted or frozen. Report error if we time
356 wait_event_interruptible_timeout(
357 sc->halt_wait, (sc->flags & SCF_HALTED),
358 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
359 if (!(sc->flags & SCF_HALTED))
363 * If the send context was halted due to a Freeze,
364 * wait until the device has been "unfrozen" before
365 * resetting the context.
367 if (sc->flags & SCF_FROZEN) {
368 wait_event_interruptible_timeout(
370 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
371 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
372 if (dd->flags & HFI1_FROZEN)
375 if (dd->flags & HFI1_FORCED_FREEZE)
377 * Don't allow context reset if we are into
384 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
387 ret = sc_restart(sc);
390 sc_return_credits(sc);
394 case HFI1_IOCTL_GET_VERS:
395 uval = HFI1_USER_SWVERSION;
396 if (put_user(uval, (int __user *)arg))
407 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
409 struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
410 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
411 struct hfi1_user_sdma_comp_q *cq = fd->cq;
412 int done = 0, reqs = 0;
413 unsigned long dim = from->nr_segs;
418 if (!iter_is_iovec(from) || !dim)
421 hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
422 fd->uctxt->ctxt, fd->subctxt, dim);
424 if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
429 unsigned long count = 0;
431 ret = hfi1_user_sdma_process_request(
432 kiocb->ki_filp, (struct iovec *)(from->iov + done),
446 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
448 struct hfi1_filedata *fd = fp->private_data;
449 struct hfi1_ctxtdata *uctxt = fd->uctxt;
450 struct hfi1_devdata *dd;
452 u64 token = vma->vm_pgoff << PAGE_SHIFT,
454 void *memvirt = NULL;
455 u8 subctxt, mapio = 0, vmf = 0, type;
460 if (!is_valid_mmap(token) || !uctxt ||
461 !(vma->vm_flags & VM_SHARED)) {
466 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
467 subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
468 type = HFI1_MMAP_TOKEN_GET(TYPE, token);
469 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) {
474 flags = vma->vm_flags;
479 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
481 (uctxt->sc->hw_context * BIT(16))) +
482 /* 64K PIO space / ctxt */
483 (type == PIO_BUFS_SOP ?
484 (TXE_PIO_SIZE / 2) : 0); /* sop? */
486 * Map only the amount allocated to the context, not the
487 * entire available context's PIO space.
489 memlen = PAGE_ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE);
490 flags &= ~VM_MAYREAD;
491 flags |= VM_DONTCOPY | VM_DONTEXPAND;
492 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
496 if (flags & VM_WRITE) {
501 * The credit return location for this context could be on the
502 * second or third page allocated for credit returns (if number
503 * of enabled contexts > 64 and 128 respectively).
505 memvirt = dd->cr_base[uctxt->numa_id].va;
506 memaddr = virt_to_phys(memvirt) +
507 (((u64)uctxt->sc->hw_free -
508 (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
510 flags &= ~VM_MAYWRITE;
511 flags |= VM_DONTCOPY | VM_DONTEXPAND;
513 * The driver has already allocated memory for credit
514 * returns and programmed it into the chip. Has that
515 * memory been flagged as non-cached?
517 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
521 memlen = uctxt->rcvhdrq_size;
522 memvirt = uctxt->rcvhdrq;
528 * The RcvEgr buffer need to be handled differently
529 * as multiple non-contiguous pages need to be mapped
530 * into the user process.
532 memlen = uctxt->egrbufs.size;
533 if ((vma->vm_end - vma->vm_start) != memlen) {
534 dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
535 (vma->vm_end - vma->vm_start), memlen);
539 if (vma->vm_flags & VM_WRITE) {
543 vma->vm_flags &= ~VM_MAYWRITE;
544 addr = vma->vm_start;
545 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
546 memlen = uctxt->egrbufs.buffers[i].len;
547 memvirt = uctxt->egrbufs.buffers[i].addr;
548 ret = remap_pfn_range(
551 * virt_to_pfn() does the same, but
552 * it's not available on x86_64
553 * when CONFIG_MMU is enabled.
555 PFN_DOWN(__pa(memvirt)),
567 * Map only the page that contains this context's user
570 memaddr = (unsigned long)
571 (dd->physaddr + RXE_PER_CONTEXT_USER)
572 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
574 * TidFlow table is on the same page as the rest of the
578 flags |= VM_DONTCOPY | VM_DONTEXPAND;
579 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
584 * Use the page where this context's flags are. User level
585 * knows where it's own bitmap is within the page.
587 memaddr = (unsigned long)(dd->events +
588 ((uctxt->ctxt - dd->first_user_ctxt) *
589 HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
592 * v3.7 removes VM_RESERVED but the effect is kept by
595 flags |= VM_IO | VM_DONTEXPAND;
599 memaddr = kvirt_to_phys((void *)dd->status);
601 flags |= VM_IO | VM_DONTEXPAND;
604 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
606 * If the memory allocation failed, the context alloc
607 * also would have failed, so we would never get here
612 if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
617 memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
618 flags &= ~VM_MAYWRITE;
621 memaddr = (u64)uctxt->subctxt_uregbase;
623 flags |= VM_IO | VM_DONTEXPAND;
626 case SUBCTXT_RCV_HDRQ:
627 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
628 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
629 flags |= VM_IO | VM_DONTEXPAND;
633 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
634 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
635 flags |= VM_IO | VM_DONTEXPAND;
636 flags &= ~VM_MAYWRITE;
640 struct hfi1_user_sdma_comp_q *cq = fd->cq;
646 memaddr = (u64)cq->comps;
647 memlen = PAGE_ALIGN(sizeof(*cq->comps) * cq->nentries);
648 flags |= VM_IO | VM_DONTEXPAND;
657 if ((vma->vm_end - vma->vm_start) != memlen) {
658 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
659 uctxt->ctxt, fd->subctxt,
660 (vma->vm_end - vma->vm_start), memlen);
665 vma->vm_flags = flags;
667 "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
668 ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
669 vma->vm_end - vma->vm_start, vma->vm_flags);
671 vma->vm_pgoff = PFN_DOWN(memaddr);
672 vma->vm_ops = &vm_ops;
675 ret = io_remap_pfn_range(vma, vma->vm_start,
679 } else if (memvirt) {
680 ret = remap_pfn_range(vma, vma->vm_start,
681 PFN_DOWN(__pa(memvirt)),
685 ret = remap_pfn_range(vma, vma->vm_start,
695 * Local (non-chip) user memory is not mapped right away but as it is
696 * accessed by the user-level code.
698 static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
702 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
704 return VM_FAULT_SIGBUS;
712 static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
714 struct hfi1_ctxtdata *uctxt;
717 uctxt = ((struct hfi1_filedata *)fp->private_data)->uctxt;
720 else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
721 pollflag = poll_urgent(fp, pt);
722 else if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
723 pollflag = poll_next(fp, pt);
730 static int hfi1_file_close(struct inode *inode, struct file *fp)
732 struct hfi1_filedata *fdata = fp->private_data;
733 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
734 struct hfi1_devdata *dd = container_of(inode->i_cdev,
737 unsigned long flags, *ev;
739 fp->private_data = NULL;
744 hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
745 mutex_lock(&hfi1_mutex);
748 /* drain user sdma queue */
749 hfi1_user_sdma_free_queues(fdata);
751 /* release the cpu */
752 hfi1_put_proc_affinity(fdata->rec_cpu_num);
754 /* clean up rcv side */
755 hfi1_user_exp_rcv_free(fdata);
758 * Clear any left over, unhandled events so the next process that
759 * gets this context doesn't get confused.
761 ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
762 HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
766 uctxt->active_slaves &= ~(1 << fdata->subctxt);
767 mutex_unlock(&hfi1_mutex);
771 spin_lock_irqsave(&dd->uctxt_lock, flags);
773 * Disable receive context and interrupt available, reset all
774 * RcvCtxtCtrl bits to default values.
776 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
777 HFI1_RCVCTRL_TIDFLOW_DIS |
778 HFI1_RCVCTRL_INTRAVAIL_DIS |
779 HFI1_RCVCTRL_TAILUPD_DIS |
780 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
781 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
782 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
783 /* Clear the context's J_KEY */
784 hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
786 * Reset context integrity checks to default.
787 * (writes to CSRs probably belong in chip.c)
789 write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
790 hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
791 sc_disable(uctxt->sc);
792 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
794 dd->rcd[uctxt->ctxt] = NULL;
796 hfi1_user_exp_rcv_grp_free(uctxt);
797 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
799 uctxt->rcvwait_to = 0;
800 uctxt->piowait_to = 0;
801 uctxt->rcvnowait = 0;
802 uctxt->pionowait = 0;
803 uctxt->event_flags = 0;
805 hfi1_stats.sps_ctxts--;
806 if (++dd->freectxts == dd->num_user_contexts)
808 mutex_unlock(&hfi1_mutex);
809 hfi1_free_ctxtdata(dd, uctxt);
812 kobject_put(&dd->kobj);
814 if (atomic_dec_and_test(&dd->user_refcount))
815 complete(&dd->user_comp);
822 * Convert kernel *virtual* addresses to physical addresses.
823 * This is used to vmalloc'ed addresses.
825 static u64 kvirt_to_phys(void *addr)
830 page = vmalloc_to_page(addr);
832 paddr = page_to_pfn(page) << PAGE_SHIFT;
837 static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
839 int i_minor, ret = 0;
840 unsigned int swmajor, swminor;
842 swmajor = uinfo->userversion >> 16;
843 if (swmajor != HFI1_USER_SWMAJOR) {
848 swminor = uinfo->userversion & 0xffff;
850 mutex_lock(&hfi1_mutex);
851 /* First, lets check if we need to setup a shared context? */
852 if (uinfo->subctxt_cnt) {
853 struct hfi1_filedata *fd = fp->private_data;
855 ret = find_shared_ctxt(fp, uinfo);
860 hfi1_get_proc_affinity(fd->uctxt->numa_id);
865 * We execute the following block if we couldn't find a
866 * shared context or if context sharing is not required.
869 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
870 ret = get_user_context(fp, uinfo, i_minor);
873 mutex_unlock(&hfi1_mutex);
878 static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
881 struct hfi1_devdata *dd = NULL;
882 int devmax, npresent, nup;
884 devmax = hfi1_count_units(&npresent, &nup);
891 dd = hfi1_lookup(devno);
894 else if (!dd->freectxts)
897 return allocate_ctxt(fp, dd, uinfo);
900 static int find_shared_ctxt(struct file *fp,
901 const struct hfi1_user_info *uinfo)
905 struct hfi1_filedata *fd = fp->private_data;
907 devmax = hfi1_count_units(NULL, NULL);
909 for (ndev = 0; ndev < devmax; ndev++) {
910 struct hfi1_devdata *dd = hfi1_lookup(ndev);
912 if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
914 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
915 struct hfi1_ctxtdata *uctxt = dd->rcd[i];
917 /* Skip ctxts which are not yet open */
918 if (!uctxt || !uctxt->cnt)
920 /* Skip ctxt if it doesn't match the requested one */
921 if (memcmp(uctxt->uuid, uinfo->uuid,
922 sizeof(uctxt->uuid)) ||
923 uctxt->jkey != generate_jkey(current_uid()) ||
924 uctxt->subctxt_id != uinfo->subctxt_id ||
925 uctxt->subctxt_cnt != uinfo->subctxt_cnt)
928 /* Verify the sharing process matches the master */
929 if (uctxt->userversion != uinfo->userversion ||
930 uctxt->cnt >= uctxt->subctxt_cnt) {
935 fd->subctxt = uctxt->cnt++;
936 uctxt->active_slaves |= 1 << fd->subctxt;
946 static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
947 struct hfi1_user_info *uinfo)
949 struct hfi1_filedata *fd = fp->private_data;
950 struct hfi1_ctxtdata *uctxt;
954 if (dd->flags & HFI1_FROZEN) {
956 * Pick an error that is unique from all other errors
957 * that are returned so the user process knows that
958 * it tried to allocate while the SPC was frozen. It
959 * it should be able to retry with success in a short
965 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
969 if (ctxt == dd->num_rcv_contexts)
973 * If we don't have a NUMA node requested, preference is towards
976 fd->rec_cpu_num = hfi1_get_proc_affinity(dd->node);
977 if (fd->rec_cpu_num != -1)
978 numa = cpu_to_node(fd->rec_cpu_num);
980 numa = numa_node_id();
981 uctxt = hfi1_create_ctxtdata(dd->pport, ctxt, numa);
984 "Unable to allocate ctxtdata memory, failing open\n");
987 hfi1_cdbg(PROC, "[%u:%u] pid %u assigned to CPU %d (NUMA %u)",
988 uctxt->ctxt, fd->subctxt, current->pid, fd->rec_cpu_num,
992 * Allocate and enable a PIO send context.
994 uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
1000 hfi1_cdbg(PROC, "allocated send context %u(%u)\n", uctxt->sc->sw_index,
1001 uctxt->sc->hw_context);
1002 ret = sc_enable(uctxt->sc);
1007 * Setup shared context resources if the user-level has requested
1008 * shared contexts and this is the 'master' process.
1009 * This has to be done here so the rest of the sub-contexts find the
1012 if (uinfo->subctxt_cnt && !fd->subctxt) {
1013 ret = init_subctxts(uctxt, uinfo);
1015 * On error, we don't need to disable and de-allocate the
1016 * send context because it will be done during file close
1021 uctxt->userversion = uinfo->userversion;
1022 uctxt->flags = hfi1_cap_mask; /* save current flag state */
1023 init_waitqueue_head(&uctxt->wait);
1024 strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1025 memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1026 uctxt->jkey = generate_jkey(current_uid());
1027 INIT_LIST_HEAD(&uctxt->sdma_queues);
1028 spin_lock_init(&uctxt->sdma_qlock);
1029 hfi1_stats.sps_ctxts++;
1031 * Disable ASPM when there are open user/PSM contexts to avoid
1032 * issues with ASPM L1 exit latency
1034 if (dd->freectxts-- == dd->num_user_contexts)
1035 aspm_disable_all(dd);
1041 dd->rcd[ctxt] = NULL;
1042 hfi1_free_ctxtdata(dd, uctxt);
1046 static int init_subctxts(struct hfi1_ctxtdata *uctxt,
1047 const struct hfi1_user_info *uinfo)
1049 unsigned num_subctxts;
1051 num_subctxts = uinfo->subctxt_cnt;
1052 if (num_subctxts > HFI1_MAX_SHARED_CTXTS)
1055 uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1056 uctxt->subctxt_id = uinfo->subctxt_id;
1057 uctxt->active_slaves = 1;
1058 uctxt->redirect_seq_cnt = 1;
1059 set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1064 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1067 unsigned num_subctxts = uctxt->subctxt_cnt;
1069 uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1070 if (!uctxt->subctxt_uregbase) {
1074 /* We can take the size of the RcvHdr Queue from the master */
1075 uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1077 if (!uctxt->subctxt_rcvhdr_base) {
1082 uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1084 if (!uctxt->subctxt_rcvegrbuf) {
1090 vfree(uctxt->subctxt_rcvhdr_base);
1092 vfree(uctxt->subctxt_uregbase);
1093 uctxt->subctxt_uregbase = NULL;
1098 static int user_init(struct file *fp)
1100 unsigned int rcvctrl_ops = 0;
1101 struct hfi1_filedata *fd = fp->private_data;
1102 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1104 /* make sure that the context has already been setup */
1105 if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
1108 /* initialize poll variables... */
1110 uctxt->urgent_poll = 0;
1113 * Now enable the ctxt for receive.
1114 * For chips that are set to DMA the tail register to memory
1115 * when they change (and when the update bit transitions from
1116 * 0 to 1. So for those chips, we turn it off and then back on.
1117 * This will (very briefly) affect any other open ctxts, but the
1118 * duration is very short, and therefore isn't an issue. We
1119 * explicitly set the in-memory tail copy to 0 beforehand, so we
1120 * don't have to wait to be sure the DMA update has happened
1121 * (chip resets head/tail to 0 on transition to enable).
1123 if (uctxt->rcvhdrtail_kvaddr)
1124 clear_rcvhdrtail(uctxt);
1126 /* Setup J_KEY before enabling the context */
1127 hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
1129 rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
1130 if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
1131 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1133 * Ignore the bit in the flags for now until proper
1134 * support for multiple packet per rcv array entry is
1137 if (!HFI1_CAP_UGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1138 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
1139 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1140 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
1141 if (HFI1_CAP_UGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1142 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
1144 * The RcvCtxtCtrl.TailUpd bit has to be explicitly written.
1145 * We can't rely on the correct value to be set from prior
1146 * uses of the chip or ctxt. Therefore, add the rcvctrl op
1149 if (HFI1_CAP_UGET_MASK(uctxt->flags, DMA_RTAIL))
1150 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
1152 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_DIS;
1153 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
1155 /* Notify any waiting slaves */
1156 if (uctxt->subctxt_cnt) {
1157 clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1158 wake_up(&uctxt->wait);
1164 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
1166 struct hfi1_ctxt_info cinfo;
1167 struct hfi1_filedata *fd = fp->private_data;
1168 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1171 memset(&cinfo, 0, sizeof(cinfo));
1172 cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
1173 HFI1_CAP_MISC_MASK) << HFI1_CAP_USER_SHIFT) |
1174 HFI1_CAP_UGET_MASK(uctxt->flags, MASK) |
1175 HFI1_CAP_KGET_MASK(uctxt->flags, K2U);
1176 /* adjust flag if this fd is not able to cache */
1178 cinfo.runtime_flags |= HFI1_CAP_TID_UNMAP; /* no caching */
1180 cinfo.num_active = hfi1_count_active_units();
1181 cinfo.unit = uctxt->dd->unit;
1182 cinfo.ctxt = uctxt->ctxt;
1183 cinfo.subctxt = fd->subctxt;
1184 cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1185 uctxt->dd->rcv_entries.group_size) +
1186 uctxt->expected_count;
1187 cinfo.credits = uctxt->sc->credits;
1188 cinfo.numa_node = uctxt->numa_id;
1189 cinfo.rec_cpu = fd->rec_cpu_num;
1190 cinfo.send_ctxt = uctxt->sc->hw_context;
1192 cinfo.egrtids = uctxt->egrbufs.alloced;
1193 cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1194 cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
1195 cinfo.sdma_ring_size = fd->cq->nentries;
1196 cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1198 trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
1199 if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
1205 static int setup_ctxt(struct file *fp)
1207 struct hfi1_filedata *fd = fp->private_data;
1208 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1209 struct hfi1_devdata *dd = uctxt->dd;
1213 * Context should be set up only once, including allocation and
1214 * programming of eager buffers. This is done if context sharing
1215 * is not requested or by the master process.
1217 if (!uctxt->subctxt_cnt || !fd->subctxt) {
1218 ret = hfi1_init_ctxt(uctxt->sc);
1222 /* Now allocate the RcvHdr queue and eager buffers. */
1223 ret = hfi1_create_rcvhdrq(dd, uctxt);
1226 ret = hfi1_setup_eagerbufs(uctxt);
1229 if (uctxt->subctxt_cnt && !fd->subctxt) {
1230 ret = setup_subctxt(uctxt);
1235 ret = wait_event_interruptible(uctxt->wait, !test_bit(
1236 HFI1_CTXT_MASTER_UNINIT,
1237 &uctxt->event_flags));
1242 ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1246 * Expected receive has to be setup for all processes (including
1247 * shared contexts). However, it has to be done after the master
1248 * context has been fully configured as it depends on the
1249 * eager/expected split of the RcvArray entries.
1250 * Setting it up here ensures that the subcontexts will be waiting
1251 * (due to the above wait_event_interruptible() until the master
1254 ret = hfi1_user_exp_rcv_init(fp);
1258 set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1263 static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
1265 struct hfi1_base_info binfo;
1266 struct hfi1_filedata *fd = fp->private_data;
1267 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1268 struct hfi1_devdata *dd = uctxt->dd;
1273 trace_hfi1_uctxtdata(uctxt->dd, uctxt);
1275 memset(&binfo, 0, sizeof(binfo));
1276 binfo.hw_version = dd->revision;
1277 binfo.sw_version = HFI1_KERN_SWVERSION;
1278 binfo.bthqp = kdeth_qp;
1279 binfo.jkey = uctxt->jkey;
1281 * If more than 64 contexts are enabled the allocated credit
1282 * return will span two or three contiguous pages. Since we only
1283 * map the page containing the context's credit return address,
1284 * we need to calculate the offset in the proper page.
1286 offset = ((u64)uctxt->sc->hw_free -
1287 (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1288 binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1289 fd->subctxt, offset);
1290 binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1292 uctxt->sc->base_addr);
1293 binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1296 uctxt->sc->base_addr);
1297 binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1300 binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1302 uctxt->egrbufs.rcvtids[0].dma);
1303 binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1307 * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1309 binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1311 offset = offset_in_page((((uctxt->ctxt - dd->first_user_ctxt) *
1312 HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
1313 sizeof(*dd->events));
1314 binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1317 binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1320 if (HFI1_CAP_IS_USET(DMA_RTAIL))
1321 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1323 if (uctxt->subctxt_cnt) {
1324 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
1327 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
1330 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
1334 sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1335 if (copy_to_user(ubase, &binfo, sz))
1340 static unsigned int poll_urgent(struct file *fp,
1341 struct poll_table_struct *pt)
1343 struct hfi1_filedata *fd = fp->private_data;
1344 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1345 struct hfi1_devdata *dd = uctxt->dd;
1348 poll_wait(fp, &uctxt->wait, pt);
1350 spin_lock_irq(&dd->uctxt_lock);
1351 if (uctxt->urgent != uctxt->urgent_poll) {
1352 pollflag = POLLIN | POLLRDNORM;
1353 uctxt->urgent_poll = uctxt->urgent;
1356 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1358 spin_unlock_irq(&dd->uctxt_lock);
1363 static unsigned int poll_next(struct file *fp,
1364 struct poll_table_struct *pt)
1366 struct hfi1_filedata *fd = fp->private_data;
1367 struct hfi1_ctxtdata *uctxt = fd->uctxt;
1368 struct hfi1_devdata *dd = uctxt->dd;
1371 poll_wait(fp, &uctxt->wait, pt);
1373 spin_lock_irq(&dd->uctxt_lock);
1374 if (hdrqempty(uctxt)) {
1375 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1376 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
1379 pollflag = POLLIN | POLLRDNORM;
1381 spin_unlock_irq(&dd->uctxt_lock);
1387 * Find all user contexts in use, and set the specified bit in their
1389 * See also find_ctxt() for a similar use, that is specific to send buffers.
1391 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1393 struct hfi1_ctxtdata *uctxt;
1394 struct hfi1_devdata *dd = ppd->dd;
1397 unsigned long flags;
1404 spin_lock_irqsave(&dd->uctxt_lock, flags);
1405 for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
1407 uctxt = dd->rcd[ctxt];
1409 unsigned long *evs = dd->events +
1410 (uctxt->ctxt - dd->first_user_ctxt) *
1411 HFI1_MAX_SHARED_CTXTS;
1414 * subctxt_cnt is 0 if not shared, so do base
1415 * separately, first, then remaining subctxt, if any
1417 set_bit(evtbit, evs);
1418 for (i = 1; i < uctxt->subctxt_cnt; i++)
1419 set_bit(evtbit, evs + i);
1422 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1428 * manage_rcvq - manage a context's receive queue
1429 * @uctxt: the context
1430 * @subctxt: the sub-context
1431 * @start_stop: action to carry out
1433 * start_stop == 0 disables receive on the context, for use in queue
1434 * overflow conditions. start_stop==1 re-enables, to be used to
1435 * re-init the software copy of the head register
1437 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1440 struct hfi1_devdata *dd = uctxt->dd;
1441 unsigned int rcvctrl_op;
1445 /* atomically clear receive enable ctxt. */
1448 * On enable, force in-memory copy of the tail register to
1449 * 0, so that protocol code doesn't have to worry about
1450 * whether or not the chip has yet updated the in-memory
1451 * copy or not on return from the system call. The chip
1452 * always resets it's tail register back to 0 on a
1453 * transition from disabled to enabled.
1455 if (uctxt->rcvhdrtail_kvaddr)
1456 clear_rcvhdrtail(uctxt);
1457 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
1459 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
1461 hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
1462 /* always; new head should be equal to new tail; see above */
1468 * clear the event notifier events for this context.
1469 * User process then performs actions appropriate to bit having been
1470 * set, if desired, and checks again in future.
1472 static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
1473 unsigned long events)
1476 struct hfi1_devdata *dd = uctxt->dd;
1482 evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
1483 HFI1_MAX_SHARED_CTXTS) + subctxt;
1485 for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1486 if (!test_bit(i, &events))
1493 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1496 int ret = -ENOENT, i, intable = 0;
1497 struct hfi1_pportdata *ppd = uctxt->ppd;
1498 struct hfi1_devdata *dd = uctxt->dd;
1500 if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1505 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1506 if (pkey == ppd->pkeys[i]) {
1512 ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
1517 static void user_remove(struct hfi1_devdata *dd)
1520 hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
1523 static int user_add(struct hfi1_devdata *dd)
1528 snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
1529 ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
1530 &dd->user_cdev, &dd->user_device,
1539 * Create per-unit files in /dev
1541 int hfi1_device_create(struct hfi1_devdata *dd)
1543 return user_add(dd);
1547 * Remove per-unit files in /dev
1548 * void, core kernel returns no errors for this stuff
1550 void hfi1_device_remove(struct hfi1_devdata *dd)