2 * Copyright (c) 2007, 2008, 2009 QLogic Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
44 #include <linux/overflow.h>
47 #include "qib_user_sdma.h"
49 /* minimum size of header */
50 #define QIB_USER_SDMA_MIN_HEADER_LENGTH 64
51 /* expected size of headers (for dma_pool) */
52 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
53 /* attempt to drain the queue for 5secs */
54 #define QIB_USER_SDMA_DRAIN_TIMEOUT 250
57 * track how many times a process open this driver.
59 static struct rb_root qib_user_sdma_rb_root = RB_ROOT;
61 struct qib_user_sdma_rb_node {
67 struct qib_user_sdma_pkt {
68 struct list_head list; /* list element */
70 u8 tiddma; /* if this is NEW tid-sdma */
71 u8 largepkt; /* this is large pkt from kmalloc */
72 u16 frag_size; /* frag size used by PSM */
73 u16 index; /* last header index or push index */
74 u16 naddr; /* dimension of addr (1..3) ... */
75 u16 addrlimit; /* addr array size */
76 u16 tidsmidx; /* current tidsm index */
77 u16 tidsmcount; /* tidsm array item count */
78 u16 payload_size; /* payload size so far for header */
79 u32 bytes_togo; /* bytes for processing */
80 u32 counter; /* sdma pkts queued counter for this entry */
81 struct qib_tid_session_member *tidsm; /* tid session member array */
82 struct qib_user_sdma_queue *pq; /* which pq this pkt belongs to */
83 u64 added; /* global descq number of entries */
86 u16 offset; /* offset for kvaddr, addr */
87 u16 length; /* length in page */
88 u16 first_desc; /* first desc */
89 u16 last_desc; /* last desc */
90 u16 put_page; /* should we put_page? */
91 u16 dma_mapped; /* is page dma_mapped? */
92 u16 dma_length; /* for dma_unmap_page() */
94 struct page *page; /* may be NULL (coherent mem) */
95 void *kvaddr; /* FIXME: only for pio hack */
97 } addr[4]; /* max pages, any more and we coalesce */
100 struct qib_user_sdma_queue {
102 * pkts sent to dma engine are queued on this
103 * list head. the type of the elements of this
104 * list are struct qib_user_sdma_pkt...
106 struct list_head sent;
109 * Because above list will be accessed by both process and
110 * signal handler, we need a spinlock for it.
112 spinlock_t sent_lock ____cacheline_aligned_in_smp;
114 /* headers with expected length are allocated from here... */
115 char header_cache_name[64];
116 struct dma_pool *header_cache;
118 /* packets are allocated from the slab cache... */
119 char pkt_slab_name[64];
120 struct kmem_cache *pkt_slab;
122 /* as packets go on the queued queue, they are counted... */
125 /* pending packets, not sending yet */
127 /* sending packets, not complete yet */
129 /* global descq number of entry of last sending packet */
133 struct rb_root dma_pages_root;
135 struct qib_user_sdma_rb_node *sdma_rb_node;
137 /* protect everything above... */
141 static struct qib_user_sdma_rb_node *
142 qib_user_sdma_rb_search(struct rb_root *root, pid_t pid)
144 struct qib_user_sdma_rb_node *sdma_rb_node;
145 struct rb_node *node = root->rb_node;
148 sdma_rb_node = container_of(node,
149 struct qib_user_sdma_rb_node, node);
150 if (pid < sdma_rb_node->pid)
151 node = node->rb_left;
152 else if (pid > sdma_rb_node->pid)
153 node = node->rb_right;
161 qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new)
163 struct rb_node **node = &(root->rb_node);
164 struct rb_node *parent = NULL;
165 struct qib_user_sdma_rb_node *got;
168 got = container_of(*node, struct qib_user_sdma_rb_node, node);
170 if (new->pid < got->pid)
171 node = &((*node)->rb_left);
172 else if (new->pid > got->pid)
173 node = &((*node)->rb_right);
178 rb_link_node(&new->node, parent, node);
179 rb_insert_color(&new->node, root);
183 struct qib_user_sdma_queue *
184 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
186 struct qib_user_sdma_queue *pq =
187 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL);
188 struct qib_user_sdma_rb_node *sdma_rb_node;
194 pq->sent_counter = 0;
198 pq->sdma_rb_node = NULL;
200 INIT_LIST_HEAD(&pq->sent);
201 spin_lock_init(&pq->sent_lock);
202 mutex_init(&pq->lock);
204 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
205 "qib-user-sdma-pkts-%u-%02u.%02u", unit, ctxt, sctxt);
206 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
207 sizeof(struct qib_user_sdma_pkt),
213 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
214 "qib-user-sdma-headers-%u-%02u.%02u", unit, ctxt, sctxt);
215 pq->header_cache = dma_pool_create(pq->header_cache_name,
217 QIB_USER_SDMA_EXP_HEADER_LENGTH,
219 if (!pq->header_cache)
222 pq->dma_pages_root = RB_ROOT;
224 sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root,
227 sdma_rb_node->refcount++;
231 sdma_rb_node = kmalloc(sizeof(
232 struct qib_user_sdma_rb_node), GFP_KERNEL);
236 sdma_rb_node->refcount = 1;
237 sdma_rb_node->pid = current->pid;
239 ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root,
243 pq->sdma_rb_node = sdma_rb_node;
248 dma_pool_destroy(pq->header_cache);
250 kmem_cache_destroy(pq->pkt_slab);
259 static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
260 int i, u16 offset, u16 len,
261 u16 first_desc, u16 last_desc,
262 u16 put_page, u16 dma_mapped,
263 struct page *page, void *kvaddr,
264 dma_addr_t dma_addr, u16 dma_length)
266 pkt->addr[i].offset = offset;
267 pkt->addr[i].length = len;
268 pkt->addr[i].first_desc = first_desc;
269 pkt->addr[i].last_desc = last_desc;
270 pkt->addr[i].put_page = put_page;
271 pkt->addr[i].dma_mapped = dma_mapped;
272 pkt->addr[i].page = page;
273 pkt->addr[i].kvaddr = kvaddr;
274 pkt->addr[i].addr = dma_addr;
275 pkt->addr[i].dma_length = dma_length;
278 static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
279 size_t len, dma_addr_t *dma_addr)
283 if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
284 hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
290 hdr = kmalloc(len, GFP_KERNEL);
300 static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
301 struct qib_user_sdma_queue *pq,
302 struct qib_user_sdma_pkt *pkt,
303 struct page *page, u16 put,
304 u16 offset, u16 len, void *kvaddr)
308 struct qib_message_header *hdr;
309 u16 newlen, pbclen, lastdesc, dma_mapped;
311 union qib_seqnum seqnum;
313 dma_addr_t dma_addr =
314 dma_map_page(&dd->pcidev->dev,
315 page, offset, len, DMA_TO_DEVICE);
318 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
320 * dma mapping error, pkt has not managed
321 * this page yet, return the page here so
322 * the caller can ignore this page.
341 * In tid-sdma, the transfer length is restricted by
342 * receiver side current tid page length.
344 if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
345 newlen = pkt->tidsm[pkt->tidsmidx].length;
350 * Then the transfer length is restricted by MTU.
351 * the last descriptor flag is determined by:
352 * 1. the current packet is at frag size length.
353 * 2. the current tid page is done if tid-sdma.
354 * 3. there is no more byte togo if sdma.
357 if ((pkt->payload_size + newlen) >= pkt->frag_size) {
358 newlen = pkt->frag_size - pkt->payload_size;
360 } else if (pkt->tiddma) {
361 if (newlen == pkt->tidsm[pkt->tidsmidx].length)
364 if (newlen == pkt->bytes_togo)
368 /* fill the next fragment in this page */
369 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
370 offset, newlen, /* offset, len */
371 0, lastdesc, /* first last desc */
372 put, dma_mapped, /* put page, dma mapped */
373 page, kvaddr, /* struct page, virt addr */
374 dma_addr, len); /* dma addr, dma length */
375 pkt->bytes_togo -= newlen;
376 pkt->payload_size += newlen;
378 if (pkt->naddr == pkt->addrlimit) {
383 /* If there is no more byte togo. (lastdesc==1) */
384 if (pkt->bytes_togo == 0) {
385 /* The packet is done, header is not dma mapped yet.
386 * it should be from kmalloc */
387 if (!pkt->addr[pkt->index].addr) {
388 pkt->addr[pkt->index].addr =
389 dma_map_single(&dd->pcidev->dev,
390 pkt->addr[pkt->index].kvaddr,
391 pkt->addr[pkt->index].dma_length,
393 if (dma_mapping_error(&dd->pcidev->dev,
394 pkt->addr[pkt->index].addr)) {
398 pkt->addr[pkt->index].dma_mapped = 1;
404 /* If tid-sdma, advance tid info. */
406 pkt->tidsm[pkt->tidsmidx].length -= newlen;
407 if (pkt->tidsm[pkt->tidsmidx].length) {
408 pkt->tidsm[pkt->tidsmidx].offset += newlen;
411 if (pkt->tidsmidx == pkt->tidsmcount) {
419 * If this is NOT the last descriptor. (newlen==len)
420 * the current packet is not done yet, but the current
421 * send side page is done.
427 * If running this driver under PSM with message size
428 * fitting into one transfer unit, it is not possible
429 * to pass this line. otherwise, it is a buggggg.
433 * Since the current packet is done, and there are more
434 * bytes togo, we need to create a new sdma header, copying
435 * from previous sdma header and modify both.
437 pbclen = pkt->addr[pkt->index].length;
438 pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
443 /* Copy the previous sdma header to new sdma header */
444 pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
445 memcpy(pbcvaddr, pbc16, pbclen);
447 /* Modify the previous sdma header */
448 hdr = (struct qib_message_header *)&pbc16[4];
451 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
453 /* New packet length */
454 hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
457 /* turn on the header suppression */
459 cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
460 /* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
461 hdr->flags &= ~(0x04|0x20);
463 /* turn off extra bytes: 20-21 bits */
464 hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
465 /* turn off ACK_REQ: 0x04 */
466 hdr->flags &= ~(0x04);
469 /* New kdeth checksum */
470 vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
471 hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
472 be16_to_cpu(hdr->lrh[2]) -
473 ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
474 le16_to_cpu(hdr->iph.pkt_flags));
476 /* The packet is done, header is not dma mapped yet.
477 * it should be from kmalloc */
478 if (!pkt->addr[pkt->index].addr) {
479 pkt->addr[pkt->index].addr =
480 dma_map_single(&dd->pcidev->dev,
481 pkt->addr[pkt->index].kvaddr,
482 pkt->addr[pkt->index].dma_length,
484 if (dma_mapping_error(&dd->pcidev->dev,
485 pkt->addr[pkt->index].addr)) {
489 pkt->addr[pkt->index].dma_mapped = 1;
492 /* Modify the new sdma header */
493 pbc16 = (__le16 *)pbcvaddr;
494 hdr = (struct qib_message_header *)&pbc16[4];
497 pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
499 /* New packet length */
500 hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
503 /* Set new tid and offset for new sdma header */
504 hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
505 (le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
506 (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
507 (pkt->tidsm[pkt->tidsmidx].offset>>2));
509 /* Middle protocol new packet offset */
510 hdr->uwords[2] += pkt->payload_size;
513 /* New kdeth checksum */
514 vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
515 hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
516 be16_to_cpu(hdr->lrh[2]) -
517 ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
518 le16_to_cpu(hdr->iph.pkt_flags));
520 /* Next sequence number in new sdma header */
521 seqnum.val = be32_to_cpu(hdr->bth[2]);
526 hdr->bth[2] = cpu_to_be32(seqnum.val);
528 /* Init new sdma header. */
529 qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
530 0, pbclen, /* offset, len */
531 1, 0, /* first last desc */
532 0, 0, /* put page, dma mapped */
533 NULL, pbcvaddr, /* struct page, virt addr */
534 pbcdaddr, pbclen); /* dma addr, dma length */
535 pkt->index = pkt->naddr;
536 pkt->payload_size = 0;
538 if (pkt->naddr == pkt->addrlimit) {
543 /* Prepare for next fragment in this page */
561 /* we've too many pages in the iovec, coalesce to a single page */
562 static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
563 struct qib_user_sdma_queue *pq,
564 struct qib_user_sdma_pkt *pkt,
565 const struct iovec *iov,
569 struct page *page = alloc_page(GFP_KERNEL);
582 for (i = 0; i < niov; i++) {
585 cfur = copy_from_user(mpage,
586 iov[i].iov_base, iov[i].iov_len);
592 mpage += iov[i].iov_len;
593 len += iov[i].iov_len;
596 ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
597 page, 0, 0, len, mpage_save);
608 * How many pages in this iovec element?
610 static size_t qib_user_sdma_num_pages(const struct iovec *iov)
612 const unsigned long addr = (unsigned long) iov->iov_base;
613 const unsigned long len = iov->iov_len;
614 const unsigned long spage = addr & PAGE_MASK;
615 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
617 return 1 + ((epage - spage) >> PAGE_SHIFT);
620 static void qib_user_sdma_free_pkt_frag(struct device *dev,
621 struct qib_user_sdma_queue *pq,
622 struct qib_user_sdma_pkt *pkt,
627 if (pkt->addr[i].page) {
628 /* only user data has page */
629 if (pkt->addr[i].dma_mapped)
632 pkt->addr[i].dma_length,
635 if (pkt->addr[i].kvaddr)
636 kunmap(pkt->addr[i].page);
638 if (pkt->addr[i].put_page)
639 put_page(pkt->addr[i].page);
641 __free_page(pkt->addr[i].page);
642 } else if (pkt->addr[i].kvaddr) {
644 if (pkt->addr[i].dma_mapped) {
645 /* from kmalloc & dma mapped */
646 dma_unmap_single(dev,
648 pkt->addr[i].dma_length,
650 kfree(pkt->addr[i].kvaddr);
651 } else if (pkt->addr[i].addr) {
652 /* free coherent mem from cache... */
653 dma_pool_free(pq->header_cache,
654 pkt->addr[i].kvaddr, pkt->addr[i].addr);
656 /* from kmalloc but not dma mapped */
657 kfree(pkt->addr[i].kvaddr);
662 /* return number of pages pinned... */
663 static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
664 struct qib_user_sdma_queue *pq,
665 struct qib_user_sdma_pkt *pkt,
666 unsigned long addr, int tlen, size_t npages)
668 struct page *pages[8];
678 ret = get_user_pages_fast(addr, j, 0, pages);
686 for (i = 0; i < j; i++) {
687 /* map the pages... */
688 unsigned long fofs = addr & ~PAGE_MASK;
689 int flen = ((fofs + tlen) > PAGE_SIZE) ?
690 (PAGE_SIZE - fofs) : tlen;
692 ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
693 pages[i], 1, fofs, flen, NULL);
695 /* current page has beed taken
696 * care of inside above call.
711 /* if error, return all pages not managed by pkt */
714 put_page(pages[i++]);
720 static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
721 struct qib_user_sdma_queue *pq,
722 struct qib_user_sdma_pkt *pkt,
723 const struct iovec *iov,
729 for (idx = 0; idx < niov; idx++) {
730 const size_t npages = qib_user_sdma_num_pages(iov + idx);
731 const unsigned long addr = (unsigned long) iov[idx].iov_base;
733 ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
734 iov[idx].iov_len, npages);
742 /* we need to ignore the first entry here */
743 for (idx = 1; idx < pkt->naddr; idx++)
744 qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
746 /* need to dma unmap the first entry, this is to restore to
747 * the original state so that caller can free the memory in
748 * error condition. Caller does not know if dma mapped or not*/
749 if (pkt->addr[0].dma_mapped) {
750 dma_unmap_single(&dd->pcidev->dev,
752 pkt->addr[0].dma_length,
754 pkt->addr[0].addr = 0;
755 pkt->addr[0].dma_mapped = 0;
762 static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
763 struct qib_user_sdma_queue *pq,
764 struct qib_user_sdma_pkt *pkt,
765 const struct iovec *iov,
766 unsigned long niov, int npages)
770 if (pkt->frag_size == pkt->bytes_togo &&
771 npages >= ARRAY_SIZE(pkt->addr))
772 ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
774 ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
779 /* free a packet list -- return counter value of last packet */
780 static void qib_user_sdma_free_pkt_list(struct device *dev,
781 struct qib_user_sdma_queue *pq,
782 struct list_head *list)
784 struct qib_user_sdma_pkt *pkt, *pkt_next;
786 list_for_each_entry_safe(pkt, pkt_next, list, list) {
789 for (i = 0; i < pkt->naddr; i++)
790 qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
795 kmem_cache_free(pq->pkt_slab, pkt);
797 INIT_LIST_HEAD(list);
801 * copy headers, coalesce etc -- pq->lock must be held
803 * we queue all the packets to list, returning the
804 * number of bytes total. list must be empty initially,
805 * as, if there is an error we clean it...
807 static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
808 struct qib_pportdata *ppd,
809 struct qib_user_sdma_queue *pq,
810 const struct iovec *iov,
812 struct list_head *list,
813 int *maxpkts, int *ndesc)
815 unsigned long idx = 0;
820 struct qib_user_sdma_pkt *pkt = NULL;
823 u32 counter = pq->counter;
826 while (idx < niov && npkts < *maxpkts) {
827 const unsigned long addr = (unsigned long) iov[idx].iov_base;
828 const unsigned long idx_save = idx;
833 size_t bytes_togo = 0;
837 len = iov[idx].iov_len;
840 if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
841 len > PAGE_SIZE || len & 3 || addr & 3) {
846 pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
852 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
859 * This assignment is a bit strange. it's because the
860 * the pbc counts the number of 32 bit words in the full
861 * packet _except_ the first word of the pbc itself...
866 * pktnw computation yields the number of 32 bit words
867 * that the caller has indicated in the PBC. note that
868 * this is one less than the total number of words that
869 * goes to the send DMA engine as the first 32 bit word
870 * of the PBC itself is not counted. Armed with this count,
871 * we can verify that the packet is consistent with the
874 pktnw = le32_to_cpu(*pbc) & 0xFFFF;
875 if (pktnw < pktnwc) {
881 while (pktnwc < pktnw && idx < niov) {
882 const size_t slen = iov[idx].iov_len;
883 const unsigned long faddr =
884 (unsigned long) iov[idx].iov_base;
886 if (slen & 3 || faddr & 3 || !slen) {
891 npages += qib_user_sdma_num_pages(&iov[idx]);
893 if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
894 bytes_togo > type_max(typeof(pkt->bytes_togo))) {
903 if (pktnwc != pktnw) {
908 frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
909 if (((frag_size ? frag_size : bytes_togo) + len) >
916 size_t tidsmsize, n, pktsize, sz, addrlimit;
918 n = npages*((2*PAGE_SIZE/frag_size)+1);
919 pktsize = struct_size(pkt, addr, n);
922 * Determine if this is tid-sdma or just sdma.
924 tiddma = (((le32_to_cpu(pbc[7])>>
925 QLOGIC_IB_I_TID_SHIFT)&
926 QLOGIC_IB_I_TID_MASK) !=
927 QLOGIC_IB_I_TID_MASK);
930 tidsmsize = iov[idx].iov_len;
934 if (check_add_overflow(pktsize, tidsmsize, &sz)) {
938 pkt = kmalloc(sz, GFP_KERNEL);
944 pkt->frag_size = frag_size;
945 if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
947 addrlimit > type_max(typeof(pkt->addrlimit))) {
951 pkt->addrlimit = addrlimit;
954 char *tidsm = (char *)pkt + pktsize;
956 cfur = copy_from_user(tidsm,
957 iov[idx].iov_base, tidsmsize);
963 (struct qib_tid_session_member *)tidsm;
964 pkt->tidsmcount = tidsmsize/
965 sizeof(struct qib_tid_session_member);
971 * pbc 'fill1' field is borrowed to pass frag size,
972 * we need to clear it after picking frag size, the
973 * hardware requires this field to be zero.
975 *pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
977 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
983 pkt->frag_size = bytes_togo;
984 pkt->addrlimit = ARRAY_SIZE(pkt->addr);
986 pkt->bytes_togo = bytes_togo;
987 pkt->payload_size = 0;
988 pkt->counter = counter;
989 pkt->tiddma = tiddma;
991 /* setup the first header */
992 qib_user_sdma_init_frag(pkt, 0, /* index */
993 0, len, /* offset, len */
994 1, 0, /* first last desc */
995 0, 0, /* put page, dma mapped */
996 NULL, pbc, /* struct page, virt addr */
997 dma_addr, len); /* dma addr, dma length */
1002 ret = qib_user_sdma_init_payload(dd, pq, pkt,
1008 /* since there is no payload, mark the
1009 * header as the last desc. */
1010 pkt->addr[0].last_desc = 1;
1012 if (dma_addr == 0) {
1014 * the header is not dma mapped yet.
1015 * it should be from kmalloc.
1017 dma_addr = dma_map_single(&dd->pcidev->dev,
1018 pbc, len, DMA_TO_DEVICE);
1019 if (dma_mapping_error(&dd->pcidev->dev,
1024 pkt->addr[0].addr = dma_addr;
1025 pkt->addr[0].dma_mapped = 1;
1032 pkt->index = 0; /* reset index for push on hw */
1033 *ndesc += pkt->naddr;
1035 list_add_tail(&pkt->list, list);
1046 kmem_cache_free(pq->pkt_slab, pkt);
1049 dma_pool_free(pq->header_cache, pbc, dma_addr);
1053 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
1058 static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq,
1061 pq->sent_counter = c;
1064 /* try to clean out queue -- needs pq->lock */
1065 static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
1066 struct qib_user_sdma_queue *pq)
1068 struct qib_devdata *dd = ppd->dd;
1069 struct list_head free_list;
1070 struct qib_user_sdma_pkt *pkt;
1071 struct qib_user_sdma_pkt *pkt_prev;
1072 unsigned long flags;
1075 if (!pq->num_sending)
1078 INIT_LIST_HEAD(&free_list);
1081 * We need this spin lock here because interrupt handler
1082 * might modify this list in qib_user_sdma_send_desc(), also
1083 * we can not get interrupted, otherwise it is a deadlock.
1085 spin_lock_irqsave(&pq->sent_lock, flags);
1086 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
1087 s64 descd = ppd->sdma_descq_removed - pkt->added;
1092 list_move_tail(&pkt->list, &free_list);
1094 /* one more packet cleaned */
1098 spin_unlock_irqrestore(&pq->sent_lock, flags);
1100 if (!list_empty(&free_list)) {
1103 pkt = list_entry(free_list.prev,
1104 struct qib_user_sdma_pkt, list);
1105 counter = pkt->counter;
1107 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1108 qib_user_sdma_set_complete_counter(pq, counter);
1114 void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq)
1119 pq->sdma_rb_node->refcount--;
1120 if (pq->sdma_rb_node->refcount == 0) {
1121 rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root);
1122 kfree(pq->sdma_rb_node);
1124 dma_pool_destroy(pq->header_cache);
1125 kmem_cache_destroy(pq->pkt_slab);
1129 /* clean descriptor queue, returns > 0 if some elements cleaned */
1130 static int qib_user_sdma_hwqueue_clean(struct qib_pportdata *ppd)
1133 unsigned long flags;
1135 spin_lock_irqsave(&ppd->sdma_lock, flags);
1136 ret = qib_sdma_make_progress(ppd);
1137 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1142 /* we're in close, drain packets so that we can cleanup successfully... */
1143 void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
1144 struct qib_user_sdma_queue *pq)
1146 struct qib_devdata *dd = ppd->dd;
1147 unsigned long flags;
1153 for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
1154 mutex_lock(&pq->lock);
1155 if (!pq->num_pending && !pq->num_sending) {
1156 mutex_unlock(&pq->lock);
1159 qib_user_sdma_hwqueue_clean(ppd);
1160 qib_user_sdma_queue_clean(ppd, pq);
1161 mutex_unlock(&pq->lock);
1165 if (pq->num_pending || pq->num_sending) {
1166 struct qib_user_sdma_pkt *pkt;
1167 struct qib_user_sdma_pkt *pkt_prev;
1168 struct list_head free_list;
1170 mutex_lock(&pq->lock);
1171 spin_lock_irqsave(&ppd->sdma_lock, flags);
1173 * Since we hold sdma_lock, it is safe without sent_lock.
1175 if (pq->num_pending) {
1176 list_for_each_entry_safe(pkt, pkt_prev,
1177 &ppd->sdma_userpending, list) {
1178 if (pkt->pq == pq) {
1179 list_move_tail(&pkt->list, &pq->sent);
1185 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1187 qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
1188 INIT_LIST_HEAD(&free_list);
1189 list_splice_init(&pq->sent, &free_list);
1190 pq->num_sending = 0;
1191 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
1192 mutex_unlock(&pq->lock);
1196 static inline __le64 qib_sdma_make_desc0(u8 gen,
1197 u64 addr, u64 dwlen, u64 dwoffset)
1199 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
1200 ((addr & 0xfffffffcULL) << 32) |
1201 /* SDmaGeneration[1:0] */
1202 ((gen & 3ULL) << 30) |
1203 /* SDmaDwordCount[10:0] */
1204 ((dwlen & 0x7ffULL) << 16) |
1205 /* SDmaBufOffset[12:2] */
1206 (dwoffset & 0x7ffULL));
1209 static inline __le64 qib_sdma_make_first_desc0(__le64 descq)
1211 return descq | cpu_to_le64(1ULL << 12);
1214 static inline __le64 qib_sdma_make_last_desc0(__le64 descq)
1216 /* last */ /* dma head */
1217 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
1220 static inline __le64 qib_sdma_make_desc1(u64 addr)
1222 /* SDmaPhyAddr[47:32] */
1223 return cpu_to_le64(addr >> 32);
1226 static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
1227 struct qib_user_sdma_pkt *pkt, int idx,
1228 unsigned ofs, u16 tail, u8 gen)
1230 const u64 addr = (u64) pkt->addr[idx].addr +
1231 (u64) pkt->addr[idx].offset;
1232 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
1236 descqp = &ppd->sdma_descq[tail].qw[0];
1238 descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
1239 if (pkt->addr[idx].first_desc)
1240 descq0 = qib_sdma_make_first_desc0(descq0);
1241 if (pkt->addr[idx].last_desc) {
1242 descq0 = qib_sdma_make_last_desc0(descq0);
1243 if (ppd->sdma_intrequest) {
1244 descq0 |= cpu_to_le64(1ULL << 15);
1245 ppd->sdma_intrequest = 0;
1250 descqp[1] = qib_sdma_make_desc1(addr);
1253 void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
1254 struct list_head *pktlist)
1256 struct qib_devdata *dd = ppd->dd;
1261 nfree = qib_sdma_descq_freecnt(ppd);
1267 tail_c = tail = ppd->sdma_descq_tail;
1268 gen_c = gen = ppd->sdma_generation;
1269 while (!list_empty(pktlist)) {
1270 struct qib_user_sdma_pkt *pkt =
1271 list_entry(pktlist->next, struct qib_user_sdma_pkt,
1277 for (i = pkt->index; i < pkt->naddr && nfree; i++) {
1278 qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
1279 ofs += pkt->addr[i].length >> 2;
1281 if (++tail == ppd->sdma_descq_cnt) {
1284 ppd->sdma_intrequest = 1;
1285 } else if (tail == (ppd->sdma_descq_cnt>>1)) {
1286 ppd->sdma_intrequest = 1;
1289 if (pkt->addr[i].last_desc == 0)
1293 * If the packet is >= 2KB mtu equivalent, we
1294 * have to use the large buffers, and have to
1295 * mark each descriptor as part of a large
1298 if (ofs > dd->piosize2kmax_dwords) {
1299 for (j = pkt->index; j <= i; j++) {
1300 ppd->sdma_descq[dtail].qw[0] |=
1301 cpu_to_le64(1ULL << 14);
1302 if (++dtail == ppd->sdma_descq_cnt)
1306 c += i + 1 - pkt->index;
1307 pkt->index = i + 1; /* index for next first */
1308 tail_c = dtail = tail;
1310 ofs = 0; /* reset for next packet */
1313 ppd->sdma_descq_added += c;
1315 if (pkt->index == pkt->naddr) {
1316 pkt->added = ppd->sdma_descq_added;
1317 pkt->pq->added = pkt->added;
1318 pkt->pq->num_pending--;
1319 spin_lock(&pkt->pq->sent_lock);
1320 pkt->pq->num_sending++;
1321 list_move_tail(&pkt->list, &pkt->pq->sent);
1322 spin_unlock(&pkt->pq->sent_lock);
1324 if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
1328 /* advance the tail on the chip if necessary */
1329 if (ppd->sdma_descq_tail != tail_c) {
1330 ppd->sdma_generation = gen_c;
1331 dd->f_sdma_update_tail(ppd, tail_c);
1334 if (nfree && !list_empty(pktlist))
1338 /* pq->lock must be held, get packets on the wire... */
1339 static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
1340 struct qib_user_sdma_queue *pq,
1341 struct list_head *pktlist, int count)
1343 unsigned long flags;
1345 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
1348 /* non-blocking mode */
1349 if (pq->sdma_rb_node->refcount > 1) {
1350 spin_lock_irqsave(&ppd->sdma_lock, flags);
1351 if (unlikely(!__qib_sdma_running(ppd))) {
1352 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1355 pq->num_pending += count;
1356 list_splice_tail_init(pktlist, &ppd->sdma_userpending);
1357 qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
1358 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1362 /* In this case, descriptors from this process are not
1363 * linked to ppd pending queue, interrupt handler
1364 * won't update this process, it is OK to directly
1365 * modify without sdma lock.
1369 pq->num_pending += count;
1371 * Blocking mode for single rail process, we must
1372 * release/regain sdma_lock to give other process
1373 * chance to make progress. This is important for
1377 spin_lock_irqsave(&ppd->sdma_lock, flags);
1378 if (unlikely(!__qib_sdma_running(ppd))) {
1379 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1382 qib_user_sdma_send_desc(ppd, pktlist);
1383 if (!list_empty(pktlist))
1384 qib_sdma_make_progress(ppd);
1385 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1386 } while (!list_empty(pktlist));
1391 int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
1392 struct qib_user_sdma_queue *pq,
1393 const struct iovec *iov,
1396 struct qib_devdata *dd = rcd->dd;
1397 struct qib_pportdata *ppd = rcd->ppd;
1399 struct list_head list;
1402 INIT_LIST_HEAD(&list);
1404 mutex_lock(&pq->lock);
1406 /* why not -ECOMM like qib_user_sdma_push_pkts() below? */
1407 if (!qib_sdma_running(ppd))
1410 /* if I have packets not complete yet */
1411 if (pq->added > ppd->sdma_descq_removed)
1412 qib_user_sdma_hwqueue_clean(ppd);
1413 /* if I have complete packets to be freed */
1414 if (pq->num_sending)
1415 qib_user_sdma_queue_clean(ppd, pq);
1421 ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
1422 iov, dim, &list, &mxp, &ndesc);
1430 /* force packets onto the sdma hw queue... */
1431 if (!list_empty(&list)) {
1433 * Lazily clean hw queue.
1435 if (qib_sdma_descq_freecnt(ppd) < ndesc) {
1436 qib_user_sdma_hwqueue_clean(ppd);
1437 if (pq->num_sending)
1438 qib_user_sdma_queue_clean(ppd, pq);
1441 ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
1452 if (!list_empty(&list))
1453 qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
1454 mutex_unlock(&pq->lock);
1456 return (ret < 0) ? ret : npkts;
1459 int qib_user_sdma_make_progress(struct qib_pportdata *ppd,
1460 struct qib_user_sdma_queue *pq)
1464 mutex_lock(&pq->lock);
1465 qib_user_sdma_hwqueue_clean(ppd);
1466 ret = qib_user_sdma_queue_clean(ppd, pq);
1467 mutex_unlock(&pq->lock);
1472 u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq)
1474 return pq ? pq->sent_counter : 0;
1477 u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq)
1479 return pq ? pq->counter : 0;