2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/poll.h>
37 #include <linux/cdev.h>
38 #include <linux/swap.h>
39 #include <linux/vmalloc.h>
40 #include <linux/highmem.h>
42 #include <linux/jiffies.h>
43 #include <asm/pgtable.h>
44 #include <linux/delay.h>
45 #include <linux/export.h>
46 #include <linux/uio.h>
51 #include "qib_common.h"
52 #include "qib_user_sdma.h"
55 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
57 static int qib_open(struct inode *, struct file *);
58 static int qib_close(struct inode *, struct file *);
59 static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
60 static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
61 static unsigned int qib_poll(struct file *, struct poll_table_struct *);
62 static int qib_mmapf(struct file *, struct vm_area_struct *);
65 * This is really, really weird shit - write() and writev() here
66 * have completely unrelated semantics. Sucky userland ABI,
69 static const struct file_operations qib_file_ops = {
72 .write_iter = qib_write_iter,
77 .llseek = noop_llseek,
81 * Convert kernel virtual addresses to physical addresses so they don't
82 * potentially conflict with the chip addresses used as mmap offsets.
83 * It doesn't really matter what mmap offset we use as long as we can
84 * interpret it correctly.
86 static u64 cvt_kvaddr(void *p)
91 page = vmalloc_to_page(p);
93 paddr = page_to_pfn(page) << PAGE_SHIFT;
98 static int qib_get_base_info(struct file *fp, void __user *ubase,
101 struct qib_ctxtdata *rcd = ctxt_fp(fp);
103 struct qib_base_info *kinfo = NULL;
104 struct qib_devdata *dd = rcd->dd;
105 struct qib_pportdata *ppd = rcd->ppd;
106 unsigned subctxt_cnt;
110 subctxt_cnt = rcd->subctxt_cnt;
117 master = !subctxt_fp(fp);
121 /* If context sharing is not requested, allow the old size structure */
123 sz -= 7 * sizeof(u64);
124 if (ubase_size < sz) {
129 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
135 ret = dd->f_get_base_info(rcd, kinfo);
139 kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
140 kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
141 kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
142 kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
144 * have to mmap whole thing
146 kinfo->spi_rcv_egrbuftotlen =
147 rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
148 kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
149 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
150 rcd->rcvegrbuf_chunks;
151 kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
153 kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
155 * for this use, may be cfgctxts summed over all chips that
156 * are are configured and present
158 kinfo->spi_nctxts = dd->cfgctxts;
159 /* unit (chip/board) our context is on */
160 kinfo->spi_unit = dd->unit;
161 kinfo->spi_port = ppd->port;
162 /* for now, only a single page */
163 kinfo->spi_tid_maxsize = PAGE_SIZE;
166 * Doing this per context, and based on the skip value, etc. This has
167 * to be the actual buffer size, since the protocol code treats it
170 * These have to be set to user addresses in the user code via mmap.
171 * These values are used on return to user code for the mmap target
172 * addresses only. For 32 bit, same 44 bit address problem, so use
173 * the physical address, not virtual. Before 2.6.11, using the
174 * page_address() macro worked, but in 2.6.11, even that returns the
175 * full 64 bit address (upper bits all 1's). So far, using the
176 * physical addresses (or chip offsets, for chip mapping) works, but
177 * no doubt some future kernel release will change that, and we'll be
178 * on to yet another method of dealing with this.
179 * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
180 * since the chips with non-zero rhf_offset don't normally
181 * enable tail register updates to host memory, but for testing,
182 * both can be enabled and used.
184 kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
185 kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
186 kinfo->spi_rhf_offset = dd->rhf_offset;
187 kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
188 kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
189 /* setup per-unit (not port) status area for user programs */
190 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
191 (char *) ppd->statusp -
192 (char *) dd->pioavailregs_dma;
193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
195 kinfo->spi_piocnt = rcd->piocnt;
196 kinfo->spi_piobufbase = (u64) rcd->piobufs;
197 kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
199 kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
200 (rcd->piocnt % subctxt_cnt);
201 /* Master's PIO buffers are after all the slave's */
202 kinfo->spi_piobufbase = (u64) rcd->piobufs +
204 (rcd->piocnt - kinfo->spi_piocnt);
206 unsigned slave = subctxt_fp(fp) - 1;
208 kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
209 kinfo->spi_piobufbase = (u64) rcd->piobufs +
210 dd->palign * kinfo->spi_piocnt * slave;
214 kinfo->spi_sendbuf_status =
215 cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
216 /* only spi_subctxt_* fields should be set in this block! */
217 kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
219 kinfo->spi_subctxt_rcvegrbuf =
220 cvt_kvaddr(rcd->subctxt_rcvegrbuf);
221 kinfo->spi_subctxt_rcvhdr_base =
222 cvt_kvaddr(rcd->subctxt_rcvhdr_base);
226 * All user buffers are 2KB buffers. If we ever support
227 * giving 4KB buffers to user processes, this will need some
228 * work. Can't use piobufbase directly, because it has
229 * both 2K and 4K buffer base values.
231 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
233 kinfo->spi_pioalign = dd->palign;
234 kinfo->spi_qpair = QIB_KD_QP;
236 * user mode PIO buffers are always 2KB, even when 4KB can
237 * be received, and sent via the kernel; this is ibmaxlen
240 kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
241 kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
242 kinfo->spi_ctxt = rcd->ctxt;
243 kinfo->spi_subctxt = subctxt_fp(fp);
244 kinfo->spi_sw_version = QIB_KERN_SWVERSION;
245 kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
246 kinfo->spi_hw_version = dd->revision;
249 kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
251 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
252 if (copy_to_user(ubase, kinfo, sz))
260 * qib_tid_update - update a context TID
262 * @fp: the qib device file
263 * @ti: the TID information
265 * The new implementation as of Oct 2004 is that the driver assigns
266 * the tid and returns it to the caller. To reduce search time, we
267 * keep a cursor for each context, walking the shadow tid array to find
268 * one that's not in use.
270 * For now, if we can't allocate the full list, we fail, although
271 * in the long run, we'll allocate as many as we can, and the
272 * caller will deal with that by trying the remaining pages later.
273 * That means that when we fail, we have to mark the tids as not in
274 * use again, in our shadow copy.
276 * It's up to the caller to free the tids when they are done.
277 * We'll unlock the pages as they free them.
279 * Also, right now we are locking one page at a time, but since
280 * the intended use of this routine is for a single group of
281 * virtually contiguous pages, that should change to improve
284 static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
285 const struct qib_tid_info *ti)
288 u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
290 struct qib_devdata *dd = rcd->dd;
293 u64 __iomem *tidbase;
294 unsigned long tidmap[8];
295 struct page **pagep = NULL;
296 unsigned subctxt = subctxt_fp(fp);
298 if (!dd->pageshadow) {
308 ctxttid = rcd->ctxt * dd->rcvtidcnt;
309 if (!rcd->subctxt_cnt) {
310 tidcnt = dd->rcvtidcnt;
311 tid = rcd->tidcursor;
313 } else if (!subctxt) {
314 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
315 (dd->rcvtidcnt % rcd->subctxt_cnt);
316 tidoff = dd->rcvtidcnt - tidcnt;
318 tid = tidcursor_fp(fp);
320 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
321 tidoff = tidcnt * (subctxt - 1);
323 tid = tidcursor_fp(fp);
326 /* make sure it all fits in tid_pg_list */
327 qib_devinfo(dd->pcidev,
328 "Process tried to allocate %u TIDs, only trying max (%u)\n",
332 pagep = (struct page **) rcd->tid_pg_list;
333 tidlist = (u16 *) &pagep[dd->rcvtidcnt];
337 memset(tidmap, 0, sizeof(tidmap));
338 /* before decrement; chip actual # */
340 tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
342 ctxttid * sizeof(*tidbase));
344 /* virtual address of first page in transfer */
345 vaddr = ti->tidvaddr;
346 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
351 ret = qib_get_user_pages(vaddr, cnt, pagep);
355 * We can't continue because the pagep array won't be
356 * initialized. This should never happen,
357 * unless perhaps the user has mpin'ed the pages
362 "Failed to lock addr %p, %u pages: errno %d\n",
363 (void *) vaddr, cnt, -ret);
366 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
369 for (; ntids--; tid++) {
372 if (!dd->pageshadow[ctxttid + tid])
377 * Oops, wrapped all the way through their TIDs,
378 * and didn't have enough free; see comments at
381 i--; /* last tidlist[i] not filled in */
385 ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
389 tidlist[i] = tid + tidoff;
390 /* we "know" system pages and TID pages are same size */
391 dd->pageshadow[ctxttid + tid] = pagep[i];
392 dd->physshadow[ctxttid + tid] = daddr;
394 * don't need atomic or it's overhead
396 __set_bit(tid, tidmap);
397 physaddr = dd->physshadow[ctxttid + tid];
398 /* PERFORMANCE: below should almost certainly be cached */
399 dd->f_put_tid(dd, &tidbase[tid],
400 RCVHQ_RCV_TYPE_EXPECTED, physaddr);
402 * don't check this tid in qib_ctxtshadow, since we
403 * just filled it in; start with the next one.
411 /* jump here if copy out of updated info failed... */
412 /* same code that's in qib_free_tid() */
413 limit = sizeof(tidmap) * BITS_PER_BYTE;
415 /* just in case size changes in future */
417 tid = find_first_bit((const unsigned long *)tidmap, limit);
418 for (; tid < limit; tid++) {
419 if (!test_bit(tid, tidmap))
421 if (dd->pageshadow[ctxttid + tid]) {
424 phys = dd->physshadow[ctxttid + tid];
425 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
426 /* PERFORMANCE: below should almost certainly
429 dd->f_put_tid(dd, &tidbase[tid],
430 RCVHQ_RCV_TYPE_EXPECTED,
432 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
434 dd->pageshadow[ctxttid + tid] = NULL;
437 qib_release_user_pages(pagep, cnt);
440 * Copy the updated array, with qib_tid's filled in, back
441 * to user. Since we did the copy in already, this "should
442 * never fail" If it does, we have to clean up...
444 if (copy_to_user((void __user *)
445 (unsigned long) ti->tidlist,
446 tidlist, cnt * sizeof(*tidlist))) {
450 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
451 tidmap, sizeof(tidmap))) {
457 if (!rcd->subctxt_cnt)
458 rcd->tidcursor = tid;
460 tidcursor_fp(fp) = tid;
468 * qib_tid_free - free a context TID
470 * @subctxt: the subcontext
473 * right now we are unlocking one page at a time, but since
474 * the intended use of this routine is for a single group of
475 * virtually contiguous pages, that should change to improve
476 * performance. We check that the TID is in range for this context
477 * but otherwise don't check validity; if user has an error and
478 * frees the wrong tid, it's only their own data that can thereby
479 * be corrupted. We do check that the TID was in use, for sanity
480 * We always use our idea of the saved address, not the address that
481 * they pass in to us.
483 static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
484 const struct qib_tid_info *ti)
487 u32 tid, ctxttid, cnt, limit, tidcnt;
488 struct qib_devdata *dd = rcd->dd;
489 u64 __iomem *tidbase;
490 unsigned long tidmap[8];
492 if (!dd->pageshadow) {
497 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
503 ctxttid = rcd->ctxt * dd->rcvtidcnt;
504 if (!rcd->subctxt_cnt)
505 tidcnt = dd->rcvtidcnt;
507 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
508 (dd->rcvtidcnt % rcd->subctxt_cnt);
509 ctxttid += dd->rcvtidcnt - tidcnt;
511 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
512 ctxttid += tidcnt * (subctxt - 1);
514 tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
516 ctxttid * sizeof(*tidbase));
518 limit = sizeof(tidmap) * BITS_PER_BYTE;
520 /* just in case size changes in future */
522 tid = find_first_bit(tidmap, limit);
523 for (cnt = 0; tid < limit; tid++) {
525 * small optimization; if we detect a run of 3 or so without
526 * any set, use find_first_bit again. That's mainly to
527 * accelerate the case where we wrapped, so we have some at
528 * the beginning, and some at the end, and a big gap
531 if (!test_bit(tid, tidmap))
534 if (dd->pageshadow[ctxttid + tid]) {
538 p = dd->pageshadow[ctxttid + tid];
539 dd->pageshadow[ctxttid + tid] = NULL;
540 phys = dd->physshadow[ctxttid + tid];
541 dd->physshadow[ctxttid + tid] = dd->tidinvalid;
542 /* PERFORMANCE: below should almost certainly be
545 dd->f_put_tid(dd, &tidbase[tid],
546 RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
547 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
549 qib_release_user_pages(&p, 1);
557 * qib_set_part_key - set a partition key
561 * We can have up to 4 active at a time (other than the default, which is
562 * always allowed). This is somewhat tricky, since multiple contexts may set
563 * the same key, so we reference count them, and clean up at exit. All 4
564 * partition keys are packed into a single qlogic_ib register. It's an
565 * error for a process to set the same pkey multiple times. We provide no
566 * mechanism to de-allocate a pkey at this time, we may eventually need to
567 * do that. I've used the atomic operations, and no locking, and only make
568 * a single pass through what's available. This should be more than
569 * adequate for some time. I'll think about spinlocks or the like if and as
572 static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
574 struct qib_pportdata *ppd = rcd->ppd;
575 int i, any = 0, pidx = -1;
576 u16 lkey = key & 0x7FFF;
579 if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
580 /* nothing to do; this key always valid */
591 * Set the full membership bit, because it has to be
592 * set in the register or the packet, and it seems
593 * cleaner to set in the register than to force all
598 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
599 if (!rcd->pkeys[i] && pidx == -1)
601 if (rcd->pkeys[i] == key) {
610 for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
611 if (!ppd->pkeys[i]) {
615 if (ppd->pkeys[i] == key) {
616 atomic_t *pkrefs = &ppd->pkeyrefs[i];
618 if (atomic_inc_return(pkrefs) > 1) {
619 rcd->pkeys[pidx] = key;
624 * lost race, decrement count, catch below
630 if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
632 * It makes no sense to have both the limited and
633 * full membership PKEY set at the same time since
634 * the unlimited one will disable the limited one.
644 for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
645 if (!ppd->pkeys[i] &&
646 atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
647 rcd->pkeys[pidx] = key;
649 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
661 * qib_manage_rcvq - manage a context's receive queue
663 * @subctxt: the subcontext
664 * @start_stop: action to carry out
666 * start_stop == 0 disables receive on the context, for use in queue
667 * overflow conditions. start_stop==1 re-enables, to be used to
668 * re-init the software copy of the head register
670 static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
673 struct qib_devdata *dd = rcd->dd;
674 unsigned int rcvctrl_op;
678 /* atomically clear receive enable ctxt. */
681 * On enable, force in-memory copy of the tail register to
682 * 0, so that protocol code doesn't have to worry about
683 * whether or not the chip has yet updated the in-memory
684 * copy or not on return from the system call. The chip
685 * always resets it's tail register back to 0 on a
686 * transition from disabled to enabled.
688 if (rcd->rcvhdrtail_kvaddr)
689 qib_clear_rcvhdrtail(rcd);
690 rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
692 rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
693 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
694 /* always; new head should be equal to new tail; see above */
699 static void qib_clean_part_key(struct qib_ctxtdata *rcd,
700 struct qib_devdata *dd)
702 int i, j, pchanged = 0;
704 struct qib_pportdata *ppd = rcd->ppd;
706 /* for debugging only */
707 oldpkey = (u64) ppd->pkeys[0] |
708 ((u64) ppd->pkeys[1] << 16) |
709 ((u64) ppd->pkeys[2] << 32) |
710 ((u64) ppd->pkeys[3] << 48);
712 for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
715 for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
716 /* check for match independent of the global bit */
717 if ((ppd->pkeys[j] & 0x7fff) !=
718 (rcd->pkeys[i] & 0x7fff))
720 if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
729 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
732 /* common code for the mappings on dma_alloc_coherent mem */
733 static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
734 unsigned len, void *kvaddr, u32 write_ok, char *what)
736 struct qib_devdata *dd = rcd->dd;
740 if ((vma->vm_end - vma->vm_start) > len) {
741 qib_devinfo(dd->pcidev,
742 "FAIL on %s: len %lx > %x\n", what,
743 vma->vm_end - vma->vm_start, len);
749 * shared context user code requires rcvhdrq mapped r/w, others
750 * only allowed readonly mapping.
753 if (vma->vm_flags & VM_WRITE) {
754 qib_devinfo(dd->pcidev,
755 "%s must be mapped readonly\n", what);
760 /* don't allow them to later change with mprotect */
761 vma->vm_flags &= ~VM_MAYWRITE;
764 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
765 ret = remap_pfn_range(vma, vma->vm_start, pfn,
766 len, vma->vm_page_prot);
768 qib_devinfo(dd->pcidev,
769 "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
770 what, rcd->ctxt, pfn, len, ret);
775 static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
783 * This is real hardware, so use io_remap. This is the mechanism
784 * for the user process to update the head registers for their ctxt
787 sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
788 if ((vma->vm_end - vma->vm_start) > sz) {
789 qib_devinfo(dd->pcidev,
790 "FAIL mmap userreg: reqlen %lx > PAGE\n",
791 vma->vm_end - vma->vm_start);
794 phys = dd->physaddr + ureg;
795 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
797 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
798 ret = io_remap_pfn_range(vma, vma->vm_start,
800 vma->vm_end - vma->vm_start,
806 static int mmap_piobufs(struct vm_area_struct *vma,
807 struct qib_devdata *dd,
808 struct qib_ctxtdata *rcd,
809 unsigned piobufs, unsigned piocnt)
815 * When we map the PIO buffers in the chip, we want to map them as
816 * writeonly, no read possible; unfortunately, x86 doesn't allow
817 * for this in hardware, but we still prevent users from asking
820 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
821 qib_devinfo(dd->pcidev,
822 "FAIL mmap piobufs: reqlen %lx > PAGE\n",
823 vma->vm_end - vma->vm_start);
828 phys = dd->physaddr + piobufs;
830 #if defined(__powerpc__)
831 /* There isn't a generic way to specify writethrough mappings */
832 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
833 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
834 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
838 * don't allow them to later change to readable with mprotect (for when
839 * not initially mapped readable, as is normally the case)
841 vma->vm_flags &= ~VM_MAYREAD;
842 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
844 /* We used PAT if wc_cookie == 0 */
846 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
848 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
849 vma->vm_end - vma->vm_start,
855 static int mmap_rcvegrbufs(struct vm_area_struct *vma,
856 struct qib_ctxtdata *rcd)
858 struct qib_devdata *dd = rcd->dd;
859 unsigned long start, size;
860 size_t total_size, i;
864 size = rcd->rcvegrbuf_size;
865 total_size = rcd->rcvegrbuf_chunks * size;
866 if ((vma->vm_end - vma->vm_start) > total_size) {
867 qib_devinfo(dd->pcidev,
868 "FAIL on egr bufs: reqlen %lx > actual %lx\n",
869 vma->vm_end - vma->vm_start,
870 (unsigned long) total_size);
875 if (vma->vm_flags & VM_WRITE) {
876 qib_devinfo(dd->pcidev,
877 "Can't map eager buffers as writable (flags=%lx)\n",
882 /* don't allow them to later change to writeable with mprotect */
883 vma->vm_flags &= ~VM_MAYWRITE;
885 start = vma->vm_start;
887 for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
888 pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
889 ret = remap_pfn_range(vma, start, pfn, size,
901 * qib_file_vma_fault - handle a VMA page fault.
903 static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
907 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
909 return VM_FAULT_SIGBUS;
917 static const struct vm_operations_struct qib_file_vm_ops = {
918 .fault = qib_file_vma_fault,
921 static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
922 struct qib_ctxtdata *rcd, unsigned subctxt)
924 struct qib_devdata *dd = rcd->dd;
925 unsigned subctxt_cnt;
931 subctxt_cnt = rcd->subctxt_cnt;
932 size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
935 * Each process has all the subctxt uregbase, rcvhdrq, and
936 * rcvegrbufs mmapped - as an array for all the processes,
937 * and also separately for this process.
939 if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
940 addr = rcd->subctxt_uregbase;
941 size = PAGE_SIZE * subctxt_cnt;
942 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
943 addr = rcd->subctxt_rcvhdr_base;
944 size = rcd->rcvhdrq_size * subctxt_cnt;
945 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
946 addr = rcd->subctxt_rcvegrbuf;
948 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
949 PAGE_SIZE * subctxt)) {
950 addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
952 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
953 rcd->rcvhdrq_size * subctxt)) {
954 addr = rcd->subctxt_rcvhdr_base +
955 rcd->rcvhdrq_size * subctxt;
956 size = rcd->rcvhdrq_size;
957 } else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
958 addr = rcd->user_event_mask;
960 } else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
962 addr = rcd->subctxt_rcvegrbuf + size * subctxt;
963 /* rcvegrbufs are read-only on the slave */
964 if (vma->vm_flags & VM_WRITE) {
965 qib_devinfo(dd->pcidev,
966 "Can't map eager buffers as writable (flags=%lx)\n",
972 * Don't allow permission to later change to writeable
975 vma->vm_flags &= ~VM_MAYWRITE;
978 len = vma->vm_end - vma->vm_start;
984 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
985 vma->vm_ops = &qib_file_vm_ops;
986 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
994 * qib_mmapf - mmap various structures into user space
995 * @fp: the file pointer
998 * We use this to have a shared buffer between the kernel and the user code
999 * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
1000 * buffers in the chip. We have the open and close entries so we can bump
1001 * the ref count and keep the driver from being unloaded while still mapped.
1003 static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
1005 struct qib_ctxtdata *rcd;
1006 struct qib_devdata *dd;
1008 unsigned piobufs, piocnt;
1012 if (!rcd || !(vma->vm_flags & VM_SHARED)) {
1019 * This is the qib_do_user_init() code, mapping the shared buffers
1020 * and per-context user registers into the user process. The address
1021 * referred to by vm_pgoff is the file offset passed via mmap().
1022 * For shared contexts, this is the kernel vmalloc() address of the
1023 * pages to share with the master.
1024 * For non-shared or master ctxts, this is a physical address.
1025 * We only do one mmap for each space mapped.
1027 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1030 * Check for 0 in case one of the allocations failed, but user
1031 * called mmap anyway.
1039 * Physical addresses must fit in 40 bits for our hardware.
1040 * Check for kernel virtual addresses first, anything else must
1041 * match a HW or memory address.
1043 ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1050 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1051 if (!rcd->subctxt_cnt) {
1052 /* ctxt is not shared */
1053 piocnt = rcd->piocnt;
1054 piobufs = rcd->piobufs;
1055 } else if (!subctxt_fp(fp)) {
1056 /* caller is the master */
1057 piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1058 (rcd->piocnt % rcd->subctxt_cnt);
1059 piobufs = rcd->piobufs +
1060 dd->palign * (rcd->piocnt - piocnt);
1062 unsigned slave = subctxt_fp(fp) - 1;
1064 /* caller is a slave */
1065 piocnt = rcd->piocnt / rcd->subctxt_cnt;
1066 piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1070 ret = mmap_ureg(vma, dd, ureg);
1071 else if (pgaddr == piobufs)
1072 ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1073 else if (pgaddr == dd->pioavailregs_phys)
1074 /* in-memory copy of pioavail registers */
1075 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1076 (void *) dd->pioavailregs_dma, 0,
1077 "pioavail registers");
1078 else if (pgaddr == rcd->rcvegr_phys)
1079 ret = mmap_rcvegrbufs(vma, rcd);
1080 else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1082 * The rcvhdrq itself; multiple pages, contiguous
1083 * from an i/o perspective. Shared contexts need
1084 * to map r/w, so we allow writing.
1086 ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1087 rcd->rcvhdrq, 1, "rcvhdrq");
1088 else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1089 /* in-memory copy of rcvhdrq tail register */
1090 ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1091 rcd->rcvhdrtail_kvaddr, 0,
1098 vma->vm_private_data = NULL;
1101 qib_devinfo(dd->pcidev,
1102 "mmap Failure %d: off %llx len %lx\n",
1103 -ret, (unsigned long long)pgaddr,
1104 vma->vm_end - vma->vm_start);
1109 static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
1111 struct poll_table_struct *pt)
1113 struct qib_devdata *dd = rcd->dd;
1116 poll_wait(fp, &rcd->wait, pt);
1118 spin_lock_irq(&dd->uctxt_lock);
1119 if (rcd->urgent != rcd->urgent_poll) {
1120 pollflag = POLLIN | POLLRDNORM;
1121 rcd->urgent_poll = rcd->urgent;
1124 set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1126 spin_unlock_irq(&dd->uctxt_lock);
1131 static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
1133 struct poll_table_struct *pt)
1135 struct qib_devdata *dd = rcd->dd;
1138 poll_wait(fp, &rcd->wait, pt);
1140 spin_lock_irq(&dd->uctxt_lock);
1141 if (dd->f_hdrqempty(rcd)) {
1142 set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1143 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1146 pollflag = POLLIN | POLLRDNORM;
1147 spin_unlock_irq(&dd->uctxt_lock);
1152 static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
1154 struct qib_ctxtdata *rcd;
1160 else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1161 pollflag = qib_poll_urgent(rcd, fp, pt);
1162 else if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1163 pollflag = qib_poll_next(rcd, fp, pt);
1170 static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1172 struct qib_filedata *fd = fp->private_data;
1173 const unsigned int weight = cpumask_weight(¤t->cpus_allowed);
1174 const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
1178 * If process has NOT already set it's affinity, select and
1179 * reserve a processor for it on the local NUMA node.
1181 if ((weight >= qib_cpulist_count) &&
1182 (cpumask_weight(local_mask) <= qib_cpulist_count)) {
1183 for_each_cpu(local_cpu, local_mask)
1184 if (!test_and_set_bit(local_cpu, qib_cpulist)) {
1185 fd->rec_cpu_num = local_cpu;
1191 * If process has NOT already set it's affinity, select and
1192 * reserve a processor for it, as a rendevous for all
1193 * users of the driver. If they don't actually later
1194 * set affinity to this cpu, or set it to some other cpu,
1195 * it just means that sooner or later we don't recommend
1196 * a cpu, and let the scheduler do it's best.
1198 if (weight >= qib_cpulist_count) {
1201 cpu = find_first_zero_bit(qib_cpulist,
1203 if (cpu == qib_cpulist_count)
1205 "no cpus avail for affinity PID %u\n",
1208 __set_bit(cpu, qib_cpulist);
1209 fd->rec_cpu_num = cpu;
1215 * Check that userland and driver are compatible for subcontexts.
1217 static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1219 /* this code is written long-hand for clarity */
1220 if (QIB_USER_SWMAJOR != user_swmajor) {
1221 /* no promise of compatibility if major mismatch */
1224 if (QIB_USER_SWMAJOR == 1) {
1225 switch (QIB_USER_SWMINOR) {
1229 /* no subctxt implementation so cannot be compatible */
1232 /* 3 is only compatible with itself */
1233 return user_swminor == 3;
1235 /* >= 4 are compatible (or are expected to be) */
1236 return user_swminor <= QIB_USER_SWMINOR;
1239 /* make no promises yet for future major versions */
1243 static int init_subctxts(struct qib_devdata *dd,
1244 struct qib_ctxtdata *rcd,
1245 const struct qib_user_info *uinfo)
1248 unsigned num_subctxts;
1252 * If the user is requesting zero subctxts,
1253 * skip the subctxt allocation.
1255 if (uinfo->spu_subctxt_cnt <= 0)
1257 num_subctxts = uinfo->spu_subctxt_cnt;
1259 /* Check for subctxt compatibility */
1260 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1261 uinfo->spu_userversion & 0xffff)) {
1262 qib_devinfo(dd->pcidev,
1263 "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
1264 (int) (uinfo->spu_userversion >> 16),
1265 (int) (uinfo->spu_userversion & 0xffff),
1266 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1269 if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1274 rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1275 if (!rcd->subctxt_uregbase) {
1279 /* Note: rcd->rcvhdrq_size isn't initialized yet. */
1280 size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1281 sizeof(u32), PAGE_SIZE) * num_subctxts;
1282 rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1283 if (!rcd->subctxt_rcvhdr_base) {
1288 rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1289 rcd->rcvegrbuf_size *
1291 if (!rcd->subctxt_rcvegrbuf) {
1296 rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1297 rcd->subctxt_id = uinfo->spu_subctxt_id;
1298 rcd->active_slaves = 1;
1299 rcd->redirect_seq_cnt = 1;
1300 set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1304 vfree(rcd->subctxt_rcvhdr_base);
1306 vfree(rcd->subctxt_uregbase);
1307 rcd->subctxt_uregbase = NULL;
1312 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1313 struct file *fp, const struct qib_user_info *uinfo)
1315 struct qib_filedata *fd = fp->private_data;
1316 struct qib_devdata *dd = ppd->dd;
1317 struct qib_ctxtdata *rcd;
1322 assign_ctxt_affinity(fp, dd);
1324 numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
1325 cpu_to_node(fd->rec_cpu_num) :
1326 numa_node_id()) : dd->assigned_node_id;
1328 rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
1331 * Allocate memory for use in qib_tid_update() at open to
1332 * reduce cost of expected send setup per message segment
1335 ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1336 dd->rcvtidcnt * sizeof(struct page **),
1339 if (!rcd || !ptmp) {
1341 "Unable to allocate ctxtdata memory, failing open\n");
1345 rcd->userversion = uinfo->spu_userversion;
1346 ret = init_subctxts(dd, rcd, uinfo);
1349 rcd->tid_pg_list = ptmp;
1350 rcd->pid = current->pid;
1351 init_waitqueue_head(&dd->rcd[ctxt]->wait);
1352 strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1354 qib_stats.sps_ctxts++;
1360 if (fd->rec_cpu_num != -1)
1361 __clear_bit(fd->rec_cpu_num, qib_cpulist);
1363 dd->rcd[ctxt] = NULL;
1370 static inline int usable(struct qib_pportdata *ppd)
1372 struct qib_devdata *dd = ppd->dd;
1374 return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1375 (ppd->lflags & QIBL_LINKACTIVE);
1379 * Select a context on the given device, either using a requested port
1380 * or the port based on the context number.
1382 static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1383 const struct qib_user_info *uinfo)
1385 struct qib_pportdata *ppd = NULL;
1389 if (!usable(dd->pport + port - 1)) {
1393 ppd = dd->pport + port - 1;
1395 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
1398 if (ctxt == dd->cfgctxts) {
1403 u32 pidx = ctxt % dd->num_pports;
1405 if (usable(dd->pport + pidx))
1406 ppd = dd->pport + pidx;
1408 for (pidx = 0; pidx < dd->num_pports && !ppd;
1410 if (usable(dd->pport + pidx))
1411 ppd = dd->pport + pidx;
1414 ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
1419 static int find_free_ctxt(int unit, struct file *fp,
1420 const struct qib_user_info *uinfo)
1422 struct qib_devdata *dd = qib_lookup(unit);
1425 if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
1428 ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
1433 static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1436 struct qib_devdata *udd = NULL;
1437 int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
1438 u32 port = uinfo->spu_port, ctxt;
1440 devmax = qib_count_units(&npresent, &nup);
1450 if (alg == QIB_PORT_ALG_ACROSS) {
1451 unsigned inuse = ~0U;
1453 /* find device (with ACTIVE ports) with fewest ctxts in use */
1454 for (ndev = 0; ndev < devmax; ndev++) {
1455 struct qib_devdata *dd = qib_lookup(ndev);
1456 unsigned cused = 0, cfree = 0, pusable = 0;
1460 if (port && port <= dd->num_pports &&
1461 usable(dd->pport + port - 1))
1464 for (i = 0; i < dd->num_pports; i++)
1465 if (usable(dd->pport + i))
1469 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1475 if (cfree && cused < inuse) {
1481 ret = choose_port_ctxt(fp, udd, port, uinfo);
1485 for (ndev = 0; ndev < devmax; ndev++) {
1486 struct qib_devdata *dd = qib_lookup(ndev);
1489 ret = choose_port_ctxt(fp, dd, port, uinfo);
1497 ret = dusable ? -EBUSY : -ENETDOWN;
1503 static int find_shared_ctxt(struct file *fp,
1504 const struct qib_user_info *uinfo)
1506 int devmax, ndev, i;
1509 devmax = qib_count_units(NULL, NULL);
1511 for (ndev = 0; ndev < devmax; ndev++) {
1512 struct qib_devdata *dd = qib_lookup(ndev);
1514 /* device portion of usable() */
1515 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1517 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1518 struct qib_ctxtdata *rcd = dd->rcd[i];
1520 /* Skip ctxts which are not yet open */
1521 if (!rcd || !rcd->cnt)
1523 /* Skip ctxt if it doesn't match the requested one */
1524 if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1526 /* Verify the sharing process matches the master */
1527 if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1528 rcd->userversion != uinfo->spu_userversion ||
1529 rcd->cnt >= rcd->subctxt_cnt) {
1534 subctxt_fp(fp) = rcd->cnt++;
1535 rcd->subpid[subctxt_fp(fp)] = current->pid;
1536 tidcursor_fp(fp) = 0;
1537 rcd->active_slaves |= 1 << subctxt_fp(fp);
1547 static int qib_open(struct inode *in, struct file *fp)
1549 /* The real work is performed later in qib_assign_ctxt() */
1550 fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1551 if (fp->private_data) /* no cpu affinity by default */
1552 ((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1553 return fp->private_data ? 0 : -ENOMEM;
1556 static int find_hca(unsigned int cpu, int *unit)
1558 int ret = 0, devmax, npresent, nup, ndev;
1562 devmax = qib_count_units(&npresent, &nup);
1571 for (ndev = 0; ndev < devmax; ndev++) {
1572 struct qib_devdata *dd = qib_lookup(ndev);
1575 if (pcibus_to_node(dd->pcidev->bus) < 0) {
1579 if (cpu_to_node(cpu) ==
1580 pcibus_to_node(dd->pcidev->bus)) {
1590 static int do_qib_user_sdma_queue_create(struct file *fp)
1592 struct qib_filedata *fd = fp->private_data;
1593 struct qib_ctxtdata *rcd = fd->rcd;
1594 struct qib_devdata *dd = rcd->dd;
1596 if (dd->flags & QIB_HAS_SEND_DMA) {
1598 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1610 * Get ctxt early, so can set affinity prior to memory allocation.
1612 static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1616 unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
1618 /* Check to be sure we haven't already initialized this file */
1624 /* for now, if major version is different, bail */
1625 swmajor = uinfo->spu_userversion >> 16;
1626 if (swmajor != QIB_USER_SWMAJOR) {
1631 swminor = uinfo->spu_userversion & 0xffff;
1633 if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
1634 alg = uinfo->spu_port_alg;
1636 mutex_lock(&qib_mutex);
1638 if (qib_compatible_subctxts(swmajor, swminor) &&
1639 uinfo->spu_subctxt_cnt) {
1640 ret = find_shared_ctxt(fp, uinfo);
1642 ret = do_qib_user_sdma_queue_create(fp);
1644 assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
1649 i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
1651 ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1654 const unsigned int cpu = cpumask_first(¤t->cpus_allowed);
1655 const unsigned int weight =
1656 cpumask_weight(¤t->cpus_allowed);
1658 if (weight == 1 && !test_bit(cpu, qib_cpulist))
1659 if (!find_hca(cpu, &unit) && unit >= 0)
1660 if (!find_free_ctxt(unit, fp, uinfo)) {
1664 ret = get_a_ctxt(fp, uinfo, alg);
1669 ret = do_qib_user_sdma_queue_create(fp);
1671 mutex_unlock(&qib_mutex);
1678 static int qib_do_user_init(struct file *fp,
1679 const struct qib_user_info *uinfo)
1682 struct qib_ctxtdata *rcd = ctxt_fp(fp);
1683 struct qib_devdata *dd;
1686 /* Subctxts don't need to initialize anything since master did it. */
1687 if (subctxt_fp(fp)) {
1688 ret = wait_event_interruptible(rcd->wait,
1689 !test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1695 /* some ctxts may get extra buffers, calculate that here */
1696 uctxt = rcd->ctxt - dd->first_user_ctxt;
1697 if (uctxt < dd->ctxts_extrabuf) {
1698 rcd->piocnt = dd->pbufsctxt + 1;
1699 rcd->pio_base = rcd->piocnt * uctxt;
1701 rcd->piocnt = dd->pbufsctxt;
1702 rcd->pio_base = rcd->piocnt * uctxt +
1707 * All user buffers are 2KB buffers. If we ever support
1708 * giving 4KB buffers to user processes, this will need some
1709 * work. Can't use piobufbase directly, because it has
1710 * both 2K and 4K buffer base values. So check and handle.
1712 if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1713 if (rcd->pio_base >= dd->piobcnt2k) {
1715 "%u:ctxt%u: no 2KB buffers available\n",
1716 dd->unit, rcd->ctxt);
1720 rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1721 qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1722 rcd->ctxt, rcd->piocnt);
1725 rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1726 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1727 TXCHK_CHG_TYPE_USER, rcd);
1729 * try to ensure that processes start up with consistent avail update
1730 * for their own range, at least. If system very quiet, it might
1731 * have the in-memory copy out of date at startup for this range of
1732 * buffers, when a context gets re-used. Do after the chg_pioavail
1733 * and before the rest of setup, so it's "almost certain" the dma
1734 * will have occurred (can't 100% guarantee, but should be many
1735 * decimals of 9s, with this ordering), given how much else happens
1738 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1741 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1742 * array for time being. If rcd->ctxt > chip-supported,
1743 * we need to do extra stuff here to handle by handling overflow
1744 * through ctxt 0, someday
1746 ret = qib_create_rcvhdrq(dd, rcd);
1748 ret = qib_setup_eagerbufs(rcd);
1752 rcd->tidcursor = 0; /* start at beginning after open */
1754 /* initialize poll variables... */
1756 rcd->urgent_poll = 0;
1759 * Now enable the ctxt for receive.
1760 * For chips that are set to DMA the tail register to memory
1761 * when they change (and when the update bit transitions from
1762 * 0 to 1. So for those chips, we turn it off and then back on.
1763 * This will (very briefly) affect any other open ctxts, but the
1764 * duration is very short, and therefore isn't an issue. We
1765 * explicitly set the in-memory tail copy to 0 beforehand, so we
1766 * don't have to wait to be sure the DMA update has happened
1767 * (chip resets head/tail to 0 on transition to enable).
1769 if (rcd->rcvhdrtail_kvaddr)
1770 qib_clear_rcvhdrtail(rcd);
1772 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1775 /* Notify any waiting slaves */
1776 if (rcd->subctxt_cnt) {
1777 clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1778 wake_up(&rcd->wait);
1783 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1784 TXCHK_CHG_TYPE_KERN, rcd);
1790 * unlock_exptid - unlock any expected TID entries context still had in use
1793 * We don't actually update the chip here, because we do a bulk update
1794 * below, using f_clear_tids.
1796 static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1798 struct qib_devdata *dd = rcd->dd;
1799 int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1800 int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1802 for (i = ctxt_tidbase; i < maxtid; i++) {
1803 struct page *p = dd->pageshadow[i];
1809 phys = dd->physshadow[i];
1810 dd->physshadow[i] = dd->tidinvalid;
1811 dd->pageshadow[i] = NULL;
1812 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1813 PCI_DMA_FROMDEVICE);
1814 qib_release_user_pages(&p, 1);
1819 static int qib_close(struct inode *in, struct file *fp)
1822 struct qib_filedata *fd;
1823 struct qib_ctxtdata *rcd;
1824 struct qib_devdata *dd;
1825 unsigned long flags;
1829 mutex_lock(&qib_mutex);
1831 fd = fp->private_data;
1832 fp->private_data = NULL;
1835 mutex_unlock(&qib_mutex);
1841 /* ensure all pio buffer writes in progress are flushed */
1844 /* drain user sdma queue */
1846 qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1847 qib_user_sdma_queue_destroy(fd->pq);
1850 if (fd->rec_cpu_num != -1)
1851 __clear_bit(fd->rec_cpu_num, qib_cpulist);
1855 * XXX If the master closes the context before the slave(s),
1856 * revoke the mmap for the eager receive queue so
1857 * the slave(s) don't wait for receive data forever.
1859 rcd->active_slaves &= ~(1 << fd->subctxt);
1860 rcd->subpid[fd->subctxt] = 0;
1861 mutex_unlock(&qib_mutex);
1865 /* early; no interrupt users after this */
1866 spin_lock_irqsave(&dd->uctxt_lock, flags);
1868 dd->rcd[ctxt] = NULL;
1871 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1873 if (rcd->rcvwait_to || rcd->piowait_to ||
1874 rcd->rcvnowait || rcd->pionowait) {
1875 rcd->rcvwait_to = 0;
1876 rcd->piowait_to = 0;
1884 /* atomically clear receive enable ctxt and intr avail. */
1885 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1886 QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1888 /* clean up the pkeys for this ctxt user */
1889 qib_clean_part_key(rcd, dd);
1890 qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1891 qib_chg_pioavailkernel(dd, rcd->pio_base,
1892 rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1894 dd->f_clear_tids(dd, rcd);
1897 unlock_expected_tids(rcd);
1898 qib_stats.sps_ctxts--;
1902 mutex_unlock(&qib_mutex);
1903 qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
1910 static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1912 struct qib_ctxt_info info;
1915 struct qib_ctxtdata *rcd = ctxt_fp(fp);
1916 struct qib_filedata *fd;
1918 fd = fp->private_data;
1920 info.num_active = qib_count_active_units();
1921 info.unit = rcd->dd->unit;
1922 info.port = rcd->ppd->port;
1923 info.ctxt = rcd->ctxt;
1924 info.subctxt = subctxt_fp(fp);
1925 /* Number of user ctxts available for this device. */
1926 info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1927 info.num_subctxts = rcd->subctxt_cnt;
1928 info.rec_cpu = fd->rec_cpu_num;
1931 if (copy_to_user(uinfo, &info, sz)) {
1941 static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1942 u32 __user *inflightp)
1944 const u32 val = qib_user_sdma_inflight_counter(pq);
1946 if (put_user(val, inflightp))
1952 static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1953 struct qib_user_sdma_queue *pq,
1954 u32 __user *completep)
1962 err = qib_user_sdma_make_progress(ppd, pq);
1966 val = qib_user_sdma_complete_counter(pq);
1967 if (put_user(val, completep))
1973 static int disarm_req_delay(struct qib_ctxtdata *rcd)
1977 if (!usable(rcd->ppd)) {
1980 * if link is down, or otherwise not usable, delay
1981 * the caller up to 30 seconds, so we don't thrash
1982 * in trying to get the chip back to ACTIVE, and
1983 * set flag so they make the call again.
1985 if (rcd->user_event_mask) {
1987 * subctxt_cnt is 0 if not shared, so do base
1988 * separately, first, then remaining subctxt, if any
1990 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1991 &rcd->user_event_mask[0]);
1992 for (i = 1; i < rcd->subctxt_cnt; i++)
1993 set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1994 &rcd->user_event_mask[i]);
1996 for (i = 0; !usable(rcd->ppd) && i < 300; i++)
2004 * Find all user contexts in use, and set the specified bit in their
2006 * See also find_ctxt() for a similar use, that is specific to send buffers.
2008 int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
2010 struct qib_ctxtdata *rcd;
2013 unsigned long flags;
2015 spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
2016 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
2018 rcd = ppd->dd->rcd[ctxt];
2021 if (rcd->user_event_mask) {
2024 * subctxt_cnt is 0 if not shared, so do base
2025 * separately, first, then remaining subctxt, if any
2027 set_bit(evtbit, &rcd->user_event_mask[0]);
2028 for (i = 1; i < rcd->subctxt_cnt; i++)
2029 set_bit(evtbit, &rcd->user_event_mask[i]);
2034 spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
2040 * clear the event notifier events for this context.
2041 * For the DISARM_BUFS case, we also take action (this obsoletes
2042 * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
2044 * Other bits don't currently require actions, just atomically clear.
2045 * User process then performs actions appropriate to bit having been
2046 * set, if desired, and checks again in future.
2048 static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
2049 unsigned long events)
2053 for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
2054 if (!test_bit(i, &events))
2056 if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
2057 (void)qib_disarm_piobufs_ifneeded(rcd);
2058 ret = disarm_req_delay(rcd);
2060 clear_bit(i, &rcd->user_event_mask[subctxt]);
2065 static ssize_t qib_write(struct file *fp, const char __user *data,
2066 size_t count, loff_t *off)
2068 const struct qib_cmd __user *ucmd;
2069 struct qib_ctxtdata *rcd;
2070 const void __user *src;
2071 size_t consumed, copy = 0;
2076 if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
2079 if (count < sizeof(cmd.type)) {
2084 ucmd = (const struct qib_cmd __user *) data;
2086 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2091 consumed = sizeof(cmd.type);
2094 case QIB_CMD_ASSIGN_CTXT:
2095 case QIB_CMD_USER_INIT:
2096 copy = sizeof(cmd.cmd.user_info);
2097 dest = &cmd.cmd.user_info;
2098 src = &ucmd->cmd.user_info;
2101 case QIB_CMD_RECV_CTRL:
2102 copy = sizeof(cmd.cmd.recv_ctrl);
2103 dest = &cmd.cmd.recv_ctrl;
2104 src = &ucmd->cmd.recv_ctrl;
2107 case QIB_CMD_CTXT_INFO:
2108 copy = sizeof(cmd.cmd.ctxt_info);
2109 dest = &cmd.cmd.ctxt_info;
2110 src = &ucmd->cmd.ctxt_info;
2113 case QIB_CMD_TID_UPDATE:
2114 case QIB_CMD_TID_FREE:
2115 copy = sizeof(cmd.cmd.tid_info);
2116 dest = &cmd.cmd.tid_info;
2117 src = &ucmd->cmd.tid_info;
2120 case QIB_CMD_SET_PART_KEY:
2121 copy = sizeof(cmd.cmd.part_key);
2122 dest = &cmd.cmd.part_key;
2123 src = &ucmd->cmd.part_key;
2126 case QIB_CMD_DISARM_BUFS:
2127 case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
2133 case QIB_CMD_POLL_TYPE:
2134 copy = sizeof(cmd.cmd.poll_type);
2135 dest = &cmd.cmd.poll_type;
2136 src = &ucmd->cmd.poll_type;
2139 case QIB_CMD_ARMLAUNCH_CTRL:
2140 copy = sizeof(cmd.cmd.armlaunch_ctrl);
2141 dest = &cmd.cmd.armlaunch_ctrl;
2142 src = &ucmd->cmd.armlaunch_ctrl;
2145 case QIB_CMD_SDMA_INFLIGHT:
2146 copy = sizeof(cmd.cmd.sdma_inflight);
2147 dest = &cmd.cmd.sdma_inflight;
2148 src = &ucmd->cmd.sdma_inflight;
2151 case QIB_CMD_SDMA_COMPLETE:
2152 copy = sizeof(cmd.cmd.sdma_complete);
2153 dest = &cmd.cmd.sdma_complete;
2154 src = &ucmd->cmd.sdma_complete;
2157 case QIB_CMD_ACK_EVENT:
2158 copy = sizeof(cmd.cmd.event_mask);
2159 dest = &cmd.cmd.event_mask;
2160 src = &ucmd->cmd.event_mask;
2169 if ((count - consumed) < copy) {
2173 if (copy_from_user(dest, src, copy)) {
2181 if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2187 case QIB_CMD_ASSIGN_CTXT:
2188 ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2193 case QIB_CMD_USER_INIT:
2194 ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2197 ret = qib_get_base_info(fp, (void __user *) (unsigned long)
2198 cmd.cmd.user_info.spu_base_info,
2199 cmd.cmd.user_info.spu_base_info_size);
2202 case QIB_CMD_RECV_CTRL:
2203 ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2206 case QIB_CMD_CTXT_INFO:
2207 ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2208 (unsigned long) cmd.cmd.ctxt_info);
2211 case QIB_CMD_TID_UPDATE:
2212 ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2215 case QIB_CMD_TID_FREE:
2216 ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2219 case QIB_CMD_SET_PART_KEY:
2220 ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2223 case QIB_CMD_DISARM_BUFS:
2224 (void)qib_disarm_piobufs_ifneeded(rcd);
2225 ret = disarm_req_delay(rcd);
2228 case QIB_CMD_PIOAVAILUPD:
2229 qib_force_pio_avail_update(rcd->dd);
2232 case QIB_CMD_POLL_TYPE:
2233 rcd->poll_type = cmd.cmd.poll_type;
2236 case QIB_CMD_ARMLAUNCH_CTRL:
2237 rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2240 case QIB_CMD_SDMA_INFLIGHT:
2241 ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2242 (u32 __user *) (unsigned long)
2243 cmd.cmd.sdma_inflight);
2246 case QIB_CMD_SDMA_COMPLETE:
2247 ret = qib_sdma_get_complete(rcd->ppd,
2248 user_sdma_queue_fp(fp),
2249 (u32 __user *) (unsigned long)
2250 cmd.cmd.sdma_complete);
2253 case QIB_CMD_ACK_EVENT:
2254 ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2255 cmd.cmd.event_mask);
2266 static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
2268 struct qib_filedata *fp = iocb->ki_filp->private_data;
2269 struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2270 struct qib_user_sdma_queue *pq = fp->pq;
2272 if (!iter_is_iovec(from) || !from->nr_segs || !pq)
2275 return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs);
2278 static struct class *qib_class;
2279 static dev_t qib_dev;
2281 int qib_cdev_init(int minor, const char *name,
2282 const struct file_operations *fops,
2283 struct cdev **cdevp, struct device **devp)
2285 const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2287 struct device *device = NULL;
2290 cdev = cdev_alloc();
2292 pr_err("Could not allocate cdev for minor %d, %s\n",
2298 cdev->owner = THIS_MODULE;
2300 kobject_set_name(&cdev->kobj, name);
2302 ret = cdev_add(cdev, dev, 1);
2304 pr_err("Could not add cdev for minor %d, %s (err %d)\n",
2309 device = device_create(qib_class, NULL, dev, NULL, "%s", name);
2310 if (!IS_ERR(device))
2312 ret = PTR_ERR(device);
2314 pr_err("Could not create device for minor %d, %s (err %d)\n",
2325 void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2327 struct device *device = *devp;
2330 device_unregister(device);
2340 static struct cdev *wildcard_cdev;
2341 static struct device *wildcard_device;
2343 int __init qib_dev_init(void)
2347 ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2349 pr_err("Could not allocate chrdev region (err %d)\n", -ret);
2353 qib_class = class_create(THIS_MODULE, "ipath");
2354 if (IS_ERR(qib_class)) {
2355 ret = PTR_ERR(qib_class);
2356 pr_err("Could not create device class (err %d)\n", -ret);
2357 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2364 void qib_dev_cleanup(void)
2367 class_destroy(qib_class);
2371 unregister_chrdev_region(qib_dev, QIB_NMINORS);
2374 static atomic_t user_count = ATOMIC_INIT(0);
2376 static void qib_user_remove(struct qib_devdata *dd)
2378 if (atomic_dec_return(&user_count) == 0)
2379 qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2381 qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2384 static int qib_user_add(struct qib_devdata *dd)
2389 if (atomic_inc_return(&user_count) == 1) {
2390 ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2391 &wildcard_cdev, &wildcard_device);
2396 snprintf(name, sizeof(name), "ipath%d", dd->unit);
2397 ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2398 &dd->user_cdev, &dd->user_device);
2400 qib_user_remove(dd);
2406 * Create per-unit files in /dev
2408 int qib_device_create(struct qib_devdata *dd)
2412 r = qib_user_add(dd);
2413 ret = qib_diag_add(dd);
2420 * Remove per-unit files in /dev
2421 * void, core kernel returns no errors for this stuff
2423 void qib_device_remove(struct qib_devdata *dd)
2425 qib_user_remove(dd);
2426 qib_diag_remove(dd);