2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
45 #include "ipath_kernel.h"
46 #include "ipath_user_sdma.h"
48 /* minimum size of header */
49 #define IPATH_USER_SDMA_MIN_HEADER_LENGTH 64
50 /* expected size of headers (for dma_pool) */
51 #define IPATH_USER_SDMA_EXP_HEADER_LENGTH 64
52 /* length mask in PBC (lower 11 bits) */
53 #define IPATH_PBC_LENGTH_MASK ((1 << 11) - 1)
55 struct ipath_user_sdma_pkt {
56 u8 naddr; /* dimension of addr (1..3) ... */
57 u32 counter; /* sdma pkts queued counter for this entry */
58 u64 added; /* global descq number of entries */
61 u32 offset; /* offset for kvaddr, addr */
62 u32 length; /* length in page */
63 u8 put_page; /* should we put_page? */
64 u8 dma_mapped; /* is page dma_mapped? */
65 struct page *page; /* may be NULL (coherent mem) */
66 void *kvaddr; /* FIXME: only for pio hack */
68 } addr[4]; /* max pages, any more and we coalesce */
69 struct list_head list; /* list element */
72 struct ipath_user_sdma_queue {
74 * pkts sent to dma engine are queued on this
75 * list head. the type of the elements of this
76 * list are struct ipath_user_sdma_pkt...
78 struct list_head sent;
80 /* headers with expected length are allocated from here... */
81 char header_cache_name[64];
82 struct dma_pool *header_cache;
84 /* packets are allocated from the slab cache... */
85 char pkt_slab_name[64];
86 struct kmem_cache *pkt_slab;
88 /* as packets go on the queued queue, they are counted... */
93 struct rb_root dma_pages_root;
95 /* protect everything above... */
99 struct ipath_user_sdma_queue *
100 ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
102 struct ipath_user_sdma_queue *pq =
103 kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
109 pq->sent_counter = 0;
110 INIT_LIST_HEAD(&pq->sent);
112 mutex_init(&pq->lock);
114 snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115 "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
116 pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117 sizeof(struct ipath_user_sdma_pkt),
123 snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124 "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
125 pq->header_cache = dma_pool_create(pq->header_cache_name,
127 IPATH_USER_SDMA_EXP_HEADER_LENGTH,
129 if (!pq->header_cache)
132 pq->dma_pages_root = RB_ROOT;
137 kmem_cache_destroy(pq->pkt_slab);
146 static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
147 int i, size_t offset, size_t len,
148 int put_page, int dma_mapped,
150 void *kvaddr, dma_addr_t dma_addr)
152 pkt->addr[i].offset = offset;
153 pkt->addr[i].length = len;
154 pkt->addr[i].put_page = put_page;
155 pkt->addr[i].dma_mapped = dma_mapped;
156 pkt->addr[i].page = page;
157 pkt->addr[i].kvaddr = kvaddr;
158 pkt->addr[i].addr = dma_addr;
161 static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
162 u32 counter, size_t offset,
163 size_t len, int dma_mapped,
165 void *kvaddr, dma_addr_t dma_addr)
168 pkt->counter = counter;
169 ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
173 /* we've too many pages in the iovec, coalesce to a single page */
174 static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
175 struct ipath_user_sdma_pkt *pkt,
176 const struct iovec *iov,
177 unsigned long niov) {
179 struct page *page = alloc_page(GFP_KERNEL);
193 for (i = 0; i < niov; i++) {
196 cfur = copy_from_user(mpage,
197 iov[i].iov_base, iov[i].iov_len);
203 mpage += iov[i].iov_len;
204 len += iov[i].iov_len;
207 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
209 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
214 ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
227 /* how many pages in this iovec element? */
228 static int ipath_user_sdma_num_pages(const struct iovec *iov)
230 const unsigned long addr = (unsigned long) iov->iov_base;
231 const unsigned long len = iov->iov_len;
232 const unsigned long spage = addr & PAGE_MASK;
233 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
235 return 1 + ((epage - spage) >> PAGE_SHIFT);
238 /* truncate length to page boundary */
239 static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
241 const unsigned long offset = offset_in_page(addr);
243 return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
246 static void ipath_user_sdma_free_pkt_frag(struct device *dev,
247 struct ipath_user_sdma_queue *pq,
248 struct ipath_user_sdma_pkt *pkt,
253 if (pkt->addr[i].page) {
254 if (pkt->addr[i].dma_mapped)
260 if (pkt->addr[i].kvaddr)
261 kunmap(pkt->addr[i].page);
263 if (pkt->addr[i].put_page)
264 put_page(pkt->addr[i].page);
266 __free_page(pkt->addr[i].page);
267 } else if (pkt->addr[i].kvaddr)
268 /* free coherent mem from cache... */
269 dma_pool_free(pq->header_cache,
270 pkt->addr[i].kvaddr, pkt->addr[i].addr);
273 /* return number of pages pinned... */
274 static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
275 struct ipath_user_sdma_pkt *pkt,
276 unsigned long addr, int tlen, int npages)
278 struct page *pages[2];
282 ret = get_user_pages_fast(addr, npages, 0, pages);
286 for (i = 0; i < ret; i++)
293 for (j = 0; j < npages; j++) {
294 /* map the pages... */
296 ipath_user_sdma_page_length(addr, tlen);
297 dma_addr_t dma_addr =
298 dma_map_page(&dd->pcidev->dev,
299 pages[j], 0, flen, DMA_TO_DEVICE);
300 unsigned long fofs = offset_in_page(addr);
302 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
307 ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
308 pages[j], kmap(pages[j]),
320 static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
321 struct ipath_user_sdma_queue *pq,
322 struct ipath_user_sdma_pkt *pkt,
323 const struct iovec *iov,
329 for (idx = 0; idx < niov; idx++) {
330 const int npages = ipath_user_sdma_num_pages(iov + idx);
331 const unsigned long addr = (unsigned long) iov[idx].iov_base;
333 ret = ipath_user_sdma_pin_pages(dd, pkt,
334 addr, iov[idx].iov_len,
343 for (idx = 0; idx < pkt->naddr; idx++)
344 ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
350 static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
351 struct ipath_user_sdma_queue *pq,
352 struct ipath_user_sdma_pkt *pkt,
353 const struct iovec *iov,
354 unsigned long niov, int npages)
358 if (npages >= ARRAY_SIZE(pkt->addr))
359 ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
361 ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
366 /* free a packet list -- return counter value of last packet */
367 static void ipath_user_sdma_free_pkt_list(struct device *dev,
368 struct ipath_user_sdma_queue *pq,
369 struct list_head *list)
371 struct ipath_user_sdma_pkt *pkt, *pkt_next;
373 list_for_each_entry_safe(pkt, pkt_next, list, list) {
376 for (i = 0; i < pkt->naddr; i++)
377 ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
379 kmem_cache_free(pq->pkt_slab, pkt);
384 * copy headers, coalesce etc -- pq->lock must be held
386 * we queue all the packets to list, returning the
387 * number of bytes total. list must be empty initially,
388 * as, if there is an error we clean it...
390 static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
391 struct ipath_user_sdma_queue *pq,
392 struct list_head *list,
393 const struct iovec *iov,
397 unsigned long idx = 0;
400 struct page *page = NULL;
403 struct ipath_user_sdma_pkt *pkt = NULL;
406 u32 counter = pq->counter;
409 while (idx < niov && npkts < maxpkts) {
410 const unsigned long addr = (unsigned long) iov[idx].iov_base;
411 const unsigned long idx_save = idx;
419 len = iov[idx].iov_len;
423 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
429 if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
430 len > PAGE_SIZE || len & 3 || addr & 3) {
435 if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
436 pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
442 page = alloc_page(GFP_KERNEL);
450 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
457 * this assignment is a bit strange. it's because the
458 * the pbc counts the number of 32 bit words in the full
459 * packet _except_ the first word of the pbc itself...
464 * pktnw computation yields the number of 32 bit words
465 * that the caller has indicated in the PBC. note that
466 * this is one less than the total number of words that
467 * goes to the send DMA engine as the first 32 bit word
468 * of the PBC itself is not counted. Armed with this count,
469 * we can verify that the packet is consistent with the
472 pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
473 if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
480 while (pktnwc < pktnw && idx < niov) {
481 const size_t slen = iov[idx].iov_len;
482 const unsigned long faddr =
483 (unsigned long) iov[idx].iov_base;
485 if (slen & 3 || faddr & 3 || !slen ||
492 if ((faddr & PAGE_MASK) !=
493 ((faddr + slen - 1) & PAGE_MASK))
501 if (pktnwc != pktnw) {
507 dma_addr = dma_map_page(&dd->pcidev->dev,
508 page, 0, len, DMA_TO_DEVICE);
509 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
517 ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
518 page, pbc, dma_addr);
521 ret = ipath_user_sdma_init_payload(dd, pq, pkt,
531 list_add_tail(&pkt->list, list);
539 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
545 dma_pool_free(pq->header_cache, pbc, dma_addr);
547 kmem_cache_free(pq->pkt_slab, pkt);
549 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
554 static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
557 pq->sent_counter = c;
560 /* try to clean out queue -- needs pq->lock */
561 static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
562 struct ipath_user_sdma_queue *pq)
564 struct list_head free_list;
565 struct ipath_user_sdma_pkt *pkt;
566 struct ipath_user_sdma_pkt *pkt_prev;
569 INIT_LIST_HEAD(&free_list);
571 list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
572 s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
577 list_move_tail(&pkt->list, &free_list);
579 /* one more packet cleaned */
583 if (!list_empty(&free_list)) {
586 pkt = list_entry(free_list.prev,
587 struct ipath_user_sdma_pkt, list);
588 counter = pkt->counter;
590 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
591 ipath_user_sdma_set_complete_counter(pq, counter);
597 void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
602 kmem_cache_destroy(pq->pkt_slab);
603 dma_pool_destroy(pq->header_cache);
607 /* clean descriptor queue, returns > 0 if some elements cleaned */
608 static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
613 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
614 ret = ipath_sdma_make_progress(dd);
615 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
620 /* we're in close, drain packets so that we can cleanup successfully... */
621 void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
622 struct ipath_user_sdma_queue *pq)
629 for (i = 0; i < 100; i++) {
630 mutex_lock(&pq->lock);
631 if (list_empty(&pq->sent)) {
632 mutex_unlock(&pq->lock);
635 ipath_user_sdma_hwqueue_clean(dd);
636 ipath_user_sdma_queue_clean(dd, pq);
637 mutex_unlock(&pq->lock);
641 if (!list_empty(&pq->sent)) {
642 struct list_head free_list;
644 printk(KERN_INFO "drain: lists not empty: forcing!\n");
645 INIT_LIST_HEAD(&free_list);
646 mutex_lock(&pq->lock);
647 list_splice_init(&pq->sent, &free_list);
648 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
649 mutex_unlock(&pq->lock);
653 static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
654 u64 addr, u64 dwlen, u64 dwoffset)
656 return cpu_to_le64(/* SDmaPhyAddr[31:0] */
657 ((addr & 0xfffffffcULL) << 32) |
658 /* SDmaGeneration[1:0] */
659 ((dd->ipath_sdma_generation & 3ULL) << 30) |
660 /* SDmaDwordCount[10:0] */
661 ((dwlen & 0x7ffULL) << 16) |
662 /* SDmaBufOffset[12:2] */
663 (dwoffset & 0x7ffULL));
666 static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
668 return descq | cpu_to_le64(1ULL << 12);
671 static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
673 /* last */ /* dma head */
674 return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
677 static inline __le64 ipath_sdma_make_desc1(u64 addr)
679 /* SDmaPhyAddr[47:32] */
680 return cpu_to_le64(addr >> 32);
683 static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
684 struct ipath_user_sdma_pkt *pkt, int idx,
685 unsigned ofs, u16 tail)
687 const u64 addr = (u64) pkt->addr[idx].addr +
688 (u64) pkt->addr[idx].offset;
689 const u64 dwlen = (u64) pkt->addr[idx].length / 4;
693 descqp = &dd->ipath_sdma_descq[tail].qw[0];
695 descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
697 descq0 = ipath_sdma_make_first_desc0(descq0);
698 if (idx == pkt->naddr - 1)
699 descq0 = ipath_sdma_make_last_desc0(descq0);
702 descqp[1] = ipath_sdma_make_desc1(addr);
705 /* pq->lock must be held, get packets on the wire... */
706 static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
707 struct ipath_user_sdma_queue *pq,
708 struct list_head *pktlist)
714 if (list_empty(pktlist))
717 if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
720 spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
722 if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
727 tail = dd->ipath_sdma_descq_tail;
728 while (!list_empty(pktlist)) {
729 struct ipath_user_sdma_pkt *pkt =
730 list_entry(pktlist->next, struct ipath_user_sdma_pkt,
736 if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
737 goto unlock_check_tail;
739 for (i = 0; i < pkt->naddr; i++) {
740 ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
741 ofs += pkt->addr[i].length >> 2;
743 if (++tail == dd->ipath_sdma_descq_cnt) {
745 ++dd->ipath_sdma_generation;
749 if ((ofs<<2) > dd->ipath_ibmaxlen) {
750 ipath_dbg("packet size %X > ibmax %X, fail\n",
751 ofs<<2, dd->ipath_ibmaxlen);
757 * if the packet is >= 2KB mtu equivalent, we have to use
758 * the large buffers, and have to mark each descriptor as
759 * part of a large buffer packet.
761 if (ofs >= IPATH_SMALLBUF_DWORDS) {
762 for (i = 0; i < pkt->naddr; i++) {
763 dd->ipath_sdma_descq[dtail].qw[0] |=
764 cpu_to_le64(1ULL << 14);
765 if (++dtail == dd->ipath_sdma_descq_cnt)
770 dd->ipath_sdma_descq_added += pkt->naddr;
771 pkt->added = dd->ipath_sdma_descq_added;
772 list_move_tail(&pkt->list, &pq->sent);
777 /* advance the tail on the chip if necessary */
778 if (dd->ipath_sdma_descq_tail != tail) {
780 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
781 dd->ipath_sdma_descq_tail = tail;
785 spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
790 int ipath_user_sdma_writev(struct ipath_devdata *dd,
791 struct ipath_user_sdma_queue *pq,
792 const struct iovec *iov,
796 struct list_head list;
799 INIT_LIST_HEAD(&list);
801 mutex_lock(&pq->lock);
803 if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
804 ipath_user_sdma_hwqueue_clean(dd);
805 ipath_user_sdma_queue_clean(dd, pq);
811 ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
819 /* force packets onto the sdma hw queue... */
820 if (!list_empty(&list)) {
822 * lazily clean hw queue. the 4 is a guess of about
823 * how many sdma descriptors a packet will take (it
824 * doesn't have to be perfect).
826 if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
827 ipath_user_sdma_hwqueue_clean(dd);
828 ipath_user_sdma_queue_clean(dd, pq);
831 ret = ipath_user_sdma_push_pkts(dd, pq, &list);
838 if (!list_empty(&list))
845 if (!list_empty(&list))
846 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
847 mutex_unlock(&pq->lock);
849 return (ret < 0) ? ret : npkts;
852 int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
853 struct ipath_user_sdma_queue *pq)
857 mutex_lock(&pq->lock);
858 ipath_user_sdma_hwqueue_clean(dd);
859 ret = ipath_user_sdma_queue_clean(dd, pq);
860 mutex_unlock(&pq->lock);
865 u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
867 return pq->sent_counter;
870 u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)