2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/types.h>
49 #include <linux/device.h>
50 #include <linux/dmapool.h>
51 #include <linux/slab.h>
52 #include <linux/list.h>
53 #include <linux/highmem.h>
55 #include <linux/uio.h>
56 #include <linux/rbtree.h>
57 #include <linux/spinlock.h>
58 #include <linux/delay.h>
59 #include <linux/kthread.h>
60 #include <linux/mmu_context.h>
61 #include <linux/module.h>
62 #include <linux/vmalloc.h>
66 #include "user_sdma.h"
67 #include "verbs.h" /* for the headers */
68 #include "common.h" /* for struct hfi1_tid_info */
72 static uint hfi1_sdma_comp_ring_size = 128;
73 module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
74 MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
76 /* The maximum number of Data io vectors per message/request */
77 #define MAX_VECTORS_PER_REQ 8
79 * Maximum number of packet to send from each message/request
80 * before moving to the next one.
82 #define MAX_PKTS_PER_QUEUE 16
84 #define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
86 #define req_opcode(x) \
87 (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
88 #define req_version(x) \
89 (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
90 #define req_iovcnt(x) \
91 (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
93 /* Number of BTH.PSN bits used for sequence number in expected rcvs */
94 #define BTH_SEQ_MASK 0x7ffull
97 * Define fields in the KDETH header so we can update the header
100 #define KDETH_OFFSET_SHIFT 0
101 #define KDETH_OFFSET_MASK 0x7fff
102 #define KDETH_OM_SHIFT 15
103 #define KDETH_OM_MASK 0x1
104 #define KDETH_TID_SHIFT 16
105 #define KDETH_TID_MASK 0x3ff
106 #define KDETH_TIDCTRL_SHIFT 26
107 #define KDETH_TIDCTRL_MASK 0x3
108 #define KDETH_INTR_SHIFT 28
109 #define KDETH_INTR_MASK 0x1
110 #define KDETH_SH_SHIFT 29
111 #define KDETH_SH_MASK 0x1
112 #define KDETH_HCRC_UPPER_SHIFT 16
113 #define KDETH_HCRC_UPPER_MASK 0xff
114 #define KDETH_HCRC_LOWER_SHIFT 24
115 #define KDETH_HCRC_LOWER_MASK 0xff
117 #define AHG_KDETH_INTR_SHIFT 12
119 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
120 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
122 #define KDETH_GET(val, field) \
123 (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
124 #define KDETH_SET(dw, field, val) do { \
125 u32 dwval = le32_to_cpu(dw); \
126 dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
127 dwval |= (((val) & KDETH_##field##_MASK) << \
128 KDETH_##field##_SHIFT); \
129 dw = cpu_to_le32(dwval); \
132 #define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
134 if ((idx) < ARRAY_SIZE((arr))) \
135 (arr)[(idx++)] = sdma_build_ahg_descriptor( \
136 (__force u16)(value), (dw), (bit), \
142 /* KDETH OM multipliers and switch over point */
143 #define KDETH_OM_SMALL 4
144 #define KDETH_OM_LARGE 64
145 #define KDETH_OM_MAX_SIZE (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
147 /* Last packet in the request */
148 #define TXREQ_FLAGS_REQ_LAST_PKT BIT(0)
150 /* SDMA request flag bits */
151 #define SDMA_REQ_HAVE_AHG 1
152 #define SDMA_REQ_HAS_ERROR 2
155 * Maximum retry attempts to submit a TX request
156 * before putting the process to sleep.
158 #define MAX_DEFER_RETRY_COUNT 1
160 static unsigned initial_pkt_count = 8;
162 #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
164 struct sdma_mmu_node;
166 struct user_sdma_iovec {
167 struct list_head list;
169 /* number of pages in this vector */
171 /* array of pinned pages for this vector */
174 * offset into the virtual address space of the vector at
175 * which we last left off.
178 struct sdma_mmu_node *node;
181 struct sdma_mmu_node {
182 struct mmu_rb_node rb;
183 struct hfi1_user_sdma_pkt_q *pq;
189 /* evict operation argument */
191 u32 cleared; /* count evicted so far */
192 u32 target; /* target count to evict */
195 struct user_sdma_request {
196 struct sdma_req_info info;
197 struct hfi1_user_sdma_pkt_q *pq;
198 struct hfi1_user_sdma_comp_q *cq;
199 /* This is the original header from user space */
200 struct hfi1_pkt_header hdr;
202 * Pointer to the SDMA engine for this request.
203 * Since different request could be on different VLs,
204 * each request will need it's own engine pointer.
206 struct sdma_engine *sde;
210 * KDETH.Offset (Eager) field
211 * We need to remember the initial value so the headers
212 * can be updated properly.
216 * KDETH.OFFSET (TID) field
217 * The offset can cover multiple packets, depending on the
218 * size of the TID entry.
223 * Remember this because the header template always sets it
228 * We copy the iovs for this request (based on
229 * info.iovcnt). These are only the data vectors
232 /* total length of the data in the request */
234 /* progress index moving along the iovs array */
236 struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
237 /* number of elements copied to the tids array */
239 /* TID array values copied from the tid_iov vector */
246 struct list_head txps;
251 * A single txreq could span up to 3 physical pages when the MTU
252 * is sufficiently large (> 4K). Each of the IOV pointers also
253 * needs it's own set of flags so the vector has been handled
254 * independently of each other.
256 struct user_sdma_txreq {
257 /* Packet header for the txreq */
258 struct hfi1_pkt_header hdr;
259 struct sdma_txreq txreq;
260 struct list_head list;
261 struct user_sdma_request *req;
266 #define SDMA_DBG(req, fmt, ...) \
267 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
268 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
270 #define SDMA_Q_DBG(pq, fmt, ...) \
271 hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
272 (pq)->subctxt, ##__VA_ARGS__)
274 static int user_sdma_send_pkts(struct user_sdma_request *, unsigned);
275 static int num_user_pages(const struct iovec *);
276 static void user_sdma_txreq_cb(struct sdma_txreq *, int);
277 static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
278 static void user_sdma_free_request(struct user_sdma_request *, bool);
279 static int pin_vector_pages(struct user_sdma_request *,
280 struct user_sdma_iovec *);
281 static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
283 static int check_header_template(struct user_sdma_request *,
284 struct hfi1_pkt_header *, u32, u32);
285 static int set_txreq_header(struct user_sdma_request *,
286 struct user_sdma_txreq *, u32);
287 static int set_txreq_header_ahg(struct user_sdma_request *,
288 struct user_sdma_txreq *, u32);
289 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *,
290 struct hfi1_user_sdma_comp_q *,
291 u16, enum hfi1_sdma_comp_state, int);
292 static inline u32 set_pkt_bth_psn(__be32, u8, u32);
293 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
295 static int defer_packet_queue(
296 struct sdma_engine *,
300 static void activate_packet_queue(struct iowait *, int);
301 static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
302 static int sdma_rb_insert(void *, struct mmu_rb_node *);
303 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
304 void *arg2, bool *stop);
305 static void sdma_rb_remove(void *, struct mmu_rb_node *);
306 static int sdma_rb_invalidate(void *, struct mmu_rb_node *);
308 static struct mmu_rb_ops sdma_rb_ops = {
309 .filter = sdma_rb_filter,
310 .insert = sdma_rb_insert,
311 .evict = sdma_rb_evict,
312 .remove = sdma_rb_remove,
313 .invalidate = sdma_rb_invalidate
316 static int defer_packet_queue(
317 struct sdma_engine *sde,
319 struct sdma_txreq *txreq,
322 struct hfi1_user_sdma_pkt_q *pq =
323 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
324 struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
326 write_seqlock(&dev->iowait_lock);
327 if (sdma_progress(sde, seq, txreq))
330 * We are assuming that if the list is enqueued somewhere, it
331 * is to the dmawait list since that is the only place where
332 * it is supposed to be enqueued.
334 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
335 if (list_empty(&pq->busy.list))
336 list_add_tail(&pq->busy.list, &sde->dmawait);
337 write_sequnlock(&dev->iowait_lock);
340 write_sequnlock(&dev->iowait_lock);
344 static void activate_packet_queue(struct iowait *wait, int reason)
346 struct hfi1_user_sdma_pkt_q *pq =
347 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
348 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
349 wake_up(&wait->wait_dma);
352 static void sdma_kmem_cache_ctor(void *obj)
354 struct user_sdma_txreq *tx = obj;
356 memset(tx, 0, sizeof(*tx));
359 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, struct file *fp)
361 struct hfi1_filedata *fd;
365 struct hfi1_devdata *dd;
366 struct hfi1_user_sdma_comp_q *cq;
367 struct hfi1_user_sdma_pkt_q *pq;
375 fd = fp->private_data;
377 if (!hfi1_sdma_comp_ring_size) {
384 pq = kzalloc(sizeof(*pq), GFP_KERNEL);
388 memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size;
389 pq->reqs = kzalloc(memsize, GFP_KERNEL);
393 memsize = BITS_TO_LONGS(hfi1_sdma_comp_ring_size) * sizeof(long);
394 pq->req_in_use = kzalloc(memsize, GFP_KERNEL);
396 goto pq_reqs_no_in_use;
398 INIT_LIST_HEAD(&pq->list);
400 pq->ctxt = uctxt->ctxt;
401 pq->subctxt = fd->subctxt;
402 pq->n_max_reqs = hfi1_sdma_comp_ring_size;
403 atomic_set(&pq->n_reqs, 0);
404 init_waitqueue_head(&pq->wait);
405 atomic_set(&pq->n_locked, 0);
408 iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
409 activate_packet_queue, NULL);
411 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
413 pq->txreq_cache = kmem_cache_create(buf,
414 sizeof(struct user_sdma_txreq),
417 sdma_kmem_cache_ctor);
418 if (!pq->txreq_cache) {
419 dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
424 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
428 memsize = PAGE_ALIGN(sizeof(*cq->comps) * hfi1_sdma_comp_ring_size);
429 cq->comps = vmalloc_user(memsize);
433 cq->nentries = hfi1_sdma_comp_ring_size;
436 ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
439 dd_dev_err(dd, "Failed to register with MMU %d", ret);
443 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
444 list_add(&pq->list, &uctxt->sdma_queues);
445 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
451 kmem_cache_destroy(pq->txreq_cache);
453 kfree(pq->req_in_use);
465 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
467 struct hfi1_ctxtdata *uctxt = fd->uctxt;
468 struct hfi1_user_sdma_pkt_q *pq;
471 hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
472 uctxt->ctxt, fd->subctxt);
476 hfi1_mmu_rb_unregister(pq->handler);
477 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
478 if (!list_empty(&pq->list))
479 list_del_init(&pq->list);
480 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
481 iowait_sdma_drain(&pq->busy);
482 /* Wait until all requests have been freed. */
483 wait_event_interruptible(
485 !atomic_read(&pq->n_reqs));
487 kfree(pq->req_in_use);
488 kmem_cache_destroy(pq->txreq_cache);
493 vfree(fd->cq->comps);
500 static u8 dlid_to_selector(u16 dlid)
502 static u8 mapping[256];
503 static int initialized;
508 memset(mapping, 0xFF, 256);
512 hash = ((dlid >> 8) ^ dlid) & 0xFF;
513 if (mapping[hash] == 0xFF) {
514 mapping[hash] = next;
515 next = (next + 1) & 0x7F;
518 return mapping[hash];
522 * hfi1_user_sdma_process_request() - Process and start a user sdma request
523 * @fp: valid file pointer
524 * @iovec: array of io vectors to process
525 * @dim: overall iovec array size
526 * @count: number of io vector array entries processed
528 int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
529 unsigned long dim, unsigned long *count)
532 struct hfi1_filedata *fd = fp->private_data;
533 struct hfi1_ctxtdata *uctxt = fd->uctxt;
534 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
535 struct hfi1_user_sdma_comp_q *cq = fd->cq;
536 struct hfi1_devdata *dd = pq->dd;
537 unsigned long idx = 0;
538 u8 pcount = initial_pkt_count;
539 struct sdma_req_info info;
540 struct user_sdma_request *req;
545 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
548 "[%u:%u:%u] First vector not big enough for header %lu/%lu",
549 dd->unit, uctxt->ctxt, fd->subctxt,
550 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
553 ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
555 hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
556 dd->unit, uctxt->ctxt, fd->subctxt, ret);
560 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
563 if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
565 "[%u:%u:%u:%u] Invalid comp index",
566 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
571 * Sanity check the header io vector count. Need at least 1 vector
572 * (header) and cannot be larger than the actual io vector count.
574 if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
576 "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
577 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
578 req_iovcnt(info.ctrl), dim);
582 if (!info.fragsize) {
584 "[%u:%u:%u:%u] Request does not specify fragsize",
585 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
589 /* Try to claim the request. */
590 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
591 hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
592 dd->unit, uctxt->ctxt, fd->subctxt,
597 * All safety checks have been done and this request has been claimed.
599 hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
600 uctxt->ctxt, fd->subctxt, info.comp_idx);
601 req = pq->reqs + info.comp_idx;
602 memset(req, 0, sizeof(*req));
603 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
606 INIT_LIST_HEAD(&req->txps);
608 memcpy(&req->info, &info, sizeof(info));
610 /* The request is initialized, count it */
611 atomic_inc(&pq->n_reqs);
613 if (req_opcode(info.ctrl) == EXPECTED) {
614 /* expected must have a TID info and at least one data vector */
615 if (req->data_iovs < 2) {
617 "Not enough vectors for expected request");
624 if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
625 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
626 MAX_VECTORS_PER_REQ);
630 /* Copy the header from the user buffer */
631 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
634 SDMA_DBG(req, "Failed to copy header template (%d)", ret);
639 /* If Static rate control is not enabled, sanitize the header. */
640 if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
643 /* Validate the opcode. Do not trust packets from user space blindly. */
644 opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
645 if ((opcode & USER_OPCODE_CHECK_MASK) !=
646 USER_OPCODE_CHECK_VAL) {
647 SDMA_DBG(req, "Invalid opcode (%d)", opcode);
652 * Validate the vl. Do not trust packets from user space blindly.
653 * VL comes from PBC, SC comes from LRH, and the VL needs to
654 * match the SC look up.
656 vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
657 sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
658 (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
659 if (vl >= dd->pport->vls_operational ||
660 vl != sc_to_vlt(dd, sc)) {
661 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
666 /* Checking P_KEY for requests from user-space */
667 if (egress_pkey_check(dd->pport, req->hdr.lrh, req->hdr.bth, sc,
668 PKEY_CHECK_INVALID)) {
674 * Also should check the BTH.lnh. If it says the next header is GRH then
675 * the RXE parsing will be off and will land in the middle of the KDETH
676 * or miss it entirely.
678 if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
679 SDMA_DBG(req, "User tried to pass in a GRH");
684 req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
686 * Calculate the initial TID offset based on the values of
687 * KDETH.OFFSET and KDETH.OM that are passed in.
689 req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
690 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
691 KDETH_OM_LARGE : KDETH_OM_SMALL);
692 SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
695 /* Save all the IO vector structures */
696 for (i = 0; i < req->data_iovs; i++) {
697 INIT_LIST_HEAD(&req->iovs[i].list);
698 memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec));
699 ret = pin_vector_pages(req, &req->iovs[i]);
704 req->data_len += req->iovs[i].iov.iov_len;
706 SDMA_DBG(req, "total data length %u", req->data_len);
708 if (pcount > req->info.npkts)
709 pcount = req->info.npkts;
712 * User space will provide the TID info only when the
713 * request type is EXPECTED. This is true even if there is
714 * only one packet in the request and the header is already
715 * setup. The reason for the singular TID case is that the
716 * driver needs to perform safety checks.
718 if (req_opcode(req->info.ctrl) == EXPECTED) {
719 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
721 if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
725 req->tids = kcalloc(ntids, sizeof(*req->tids), GFP_KERNEL);
731 * We have to copy all of the tids because they may vary
732 * in size and, therefore, the TID count might not be
733 * equal to the pkt count. However, there is no way to
734 * tell at this point.
736 ret = copy_from_user(req->tids, iovec[idx].iov_base,
737 ntids * sizeof(*req->tids));
739 SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
748 dlid = be16_to_cpu(req->hdr.lrh[1]);
749 selector = dlid_to_selector(dlid);
750 selector += uctxt->ctxt + fd->subctxt;
751 req->sde = sdma_select_user_engine(dd, selector, vl);
753 if (!req->sde || !sdma_running(req->sde)) {
758 /* We don't need an AHG entry if the request contains only one packet */
759 if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG)) {
760 int ahg = sdma_ahg_alloc(req->sde);
762 if (likely(ahg >= 0)) {
763 req->ahg_idx = (u8)ahg;
764 set_bit(SDMA_REQ_HAVE_AHG, &req->flags);
768 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
769 pq->state = SDMA_PKT_Q_ACTIVE;
770 /* Send the first N packets in the request to buy us some time */
771 ret = user_sdma_send_pkts(req, pcount);
772 if (unlikely(ret < 0 && ret != -EBUSY))
776 * This is a somewhat blocking send implementation.
777 * The driver will block the caller until all packets of the
778 * request have been submitted to the SDMA engine. However, it
779 * will not wait for send completions.
781 while (req->seqsubmitted != req->info.npkts) {
782 ret = user_sdma_send_pkts(req, pcount);
786 wait_event_interruptible_timeout(
788 (pq->state == SDMA_PKT_Q_ACTIVE),
790 SDMA_IOWAIT_TIMEOUT));
797 * If the submitted seqsubmitted == npkts, the completion routine
798 * controls the final state. If sequbmitted < npkts, wait for any
799 * outstanding packets to finish before cleaning up.
801 if (req->seqsubmitted < req->info.npkts) {
802 if (req->seqsubmitted)
803 wait_event(pq->busy.wait_dma,
804 (req->seqcomp == req->seqsubmitted - 1));
805 user_sdma_free_request(req, true);
807 set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
812 static inline u32 compute_data_length(struct user_sdma_request *req,
813 struct user_sdma_txreq *tx)
816 * Determine the proper size of the packet data.
817 * The size of the data of the first packet is in the header
818 * template. However, it includes the header and ICRC, which need
820 * The minimum representable packet data length in a header is 4 bytes,
821 * therefore, when the data length request is less than 4 bytes, there's
822 * only one packet, and the packet data length is equal to that of the
823 * request data length.
824 * The size of the remaining packets is the minimum of the frag
825 * size (MTU) or remaining data in the request.
830 if (req->data_len < sizeof(u32))
833 len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
834 (sizeof(tx->hdr) - 4));
835 } else if (req_opcode(req->info.ctrl) == EXPECTED) {
836 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
839 * Get the data length based on the remaining space in the
842 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
843 /* If we've filled up the TID pair, move to the next one. */
844 if (unlikely(!len) && ++req->tididx < req->n_tids &&
845 req->tids[req->tididx]) {
846 tidlen = EXP_TID_GET(req->tids[req->tididx],
849 len = min_t(u32, tidlen, req->info.fragsize);
852 * Since the TID pairs map entire pages, make sure that we
853 * are not going to try to send more data that we have
856 len = min(len, req->data_len - req->sent);
858 len = min(req->data_len - req->sent, (u32)req->info.fragsize);
860 SDMA_DBG(req, "Data Length = %u", len);
864 static inline u32 pad_len(u32 len)
866 if (len & (sizeof(u32) - 1))
867 len += sizeof(u32) - (len & (sizeof(u32) - 1));
871 static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
873 /* (Size of complete header - size of PBC) + 4B ICRC + data length */
874 return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
877 static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
881 struct user_sdma_txreq *tx = NULL;
882 struct hfi1_user_sdma_pkt_q *pq = NULL;
883 struct user_sdma_iovec *iovec = NULL;
890 /* If tx completion has reported an error, we are done. */
891 if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags))
895 * Check if we might have sent the entire request already
897 if (unlikely(req->seqnum == req->info.npkts)) {
898 if (!list_empty(&req->txps))
903 if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
904 maxpkts = req->info.npkts - req->seqnum;
906 while (npkts < maxpkts) {
907 u32 datalen = 0, queued = 0, data_sent = 0;
911 * Check whether any of the completions have come back
912 * with errors. If so, we are not going to process any
913 * more packets from this request.
915 if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags))
918 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
924 INIT_LIST_HEAD(&tx->list);
926 if (req->seqnum == req->info.npkts - 1)
927 tx->flags |= TXREQ_FLAGS_REQ_LAST_PKT;
930 * Calculate the payload size - this is min of the fragment
931 * (MTU) size or the remaining bytes in the request but only
932 * if we have payload data.
935 iovec = &req->iovs[req->iov_idx];
936 if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
937 if (++req->iov_idx == req->data_iovs) {
941 iovec = &req->iovs[req->iov_idx];
942 WARN_ON(iovec->offset);
945 datalen = compute_data_length(req, tx);
948 "Request has data but pkt len is 0");
954 if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags)) {
956 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
957 u32 lrhlen = get_lrh_len(req->hdr,
960 * Copy the request header into the tx header
961 * because the HW needs a cacheline-aligned
963 * This copy can be optimized out if the hdr
964 * member of user_sdma_request were also
967 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
968 if (PBC2LRH(pbclen) != lrhlen) {
969 pbclen = (pbclen & 0xf000) |
971 tx->hdr.pbc[0] = cpu_to_le16(pbclen);
973 ret = sdma_txinit_ahg(&tx->txreq,
974 SDMA_TXREQ_F_AHG_COPY,
975 sizeof(tx->hdr) + datalen,
976 req->ahg_idx, 0, NULL, 0,
980 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
988 changes = set_txreq_header_ahg(req, tx,
992 sdma_txinit_ahg(&tx->txreq,
993 SDMA_TXREQ_F_USE_AHG,
994 datalen, req->ahg_idx, changes,
995 req->ahg, sizeof(req->hdr),
999 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
1000 datalen, user_sdma_txreq_cb);
1004 * Modify the header for this packet. This only needs
1005 * to be done if we are not going to use AHG. Otherwise,
1006 * the HW will do it based on the changes we gave it
1007 * during sdma_txinit_ahg().
1009 ret = set_txreq_header(req, tx, datalen);
1015 * If the request contains any data vectors, add up to
1016 * fragsize bytes to the descriptor.
1018 while (queued < datalen &&
1019 (req->sent + data_sent) < req->data_len) {
1020 unsigned long base, offset;
1021 unsigned pageidx, len;
1023 base = (unsigned long)iovec->iov.iov_base;
1024 offset = offset_in_page(base + iovec->offset +
1026 pageidx = (((iovec->offset + iov_offset +
1027 base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
1028 len = offset + req->info.fragsize > PAGE_SIZE ?
1029 PAGE_SIZE - offset : req->info.fragsize;
1030 len = min((datalen - queued), len);
1031 ret = sdma_txadd_page(pq->dd, &tx->txreq,
1032 iovec->pages[pageidx],
1035 SDMA_DBG(req, "SDMA txreq add page failed %d\n",
1042 if (unlikely(queued < datalen &&
1043 pageidx == iovec->npages &&
1044 req->iov_idx < req->data_iovs - 1)) {
1045 iovec->offset += iov_offset;
1046 iovec = &req->iovs[++req->iov_idx];
1051 * The txreq was submitted successfully so we can update
1054 req->koffset += datalen;
1055 if (req_opcode(req->info.ctrl) == EXPECTED)
1056 req->tidoffset += datalen;
1057 req->sent += data_sent;
1059 iovec->offset += iov_offset;
1060 list_add_tail(&tx->txreq.list, &req->txps);
1062 * It is important to increment this here as it is used to
1063 * generate the BTH.PSN and, therefore, can't be bulk-updated
1064 * outside of the loop.
1066 tx->seqnum = req->seqnum++;
1070 ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
1071 req->seqsubmitted += count;
1072 if (req->seqsubmitted == req->info.npkts) {
1074 * The txreq has already been submitted to the HW queue
1075 * so we can free the AHG entry now. Corruption will not
1076 * happen due to the sequential manner in which
1077 * descriptors are processed.
1079 if (test_bit(SDMA_REQ_HAVE_AHG, &req->flags))
1080 sdma_ahg_free(req->sde, req->ahg_idx);
1085 sdma_txclean(pq->dd, &tx->txreq);
1087 kmem_cache_free(pq->txreq_cache, tx);
1092 * How many pages in this iovec element?
1094 static inline int num_user_pages(const struct iovec *iov)
1096 const unsigned long addr = (unsigned long)iov->iov_base;
1097 const unsigned long len = iov->iov_len;
1098 const unsigned long spage = addr & PAGE_MASK;
1099 const unsigned long epage = (addr + len - 1) & PAGE_MASK;
1101 return 1 + ((epage - spage) >> PAGE_SHIFT);
1104 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
1106 struct evict_data evict_data;
1108 evict_data.cleared = 0;
1109 evict_data.target = npages;
1110 hfi1_mmu_rb_evict(pq->handler, &evict_data);
1111 return evict_data.cleared;
1114 static int pin_vector_pages(struct user_sdma_request *req,
1115 struct user_sdma_iovec *iovec)
1117 int ret = 0, pinned, npages, cleared;
1118 struct page **pages;
1119 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1120 struct sdma_mmu_node *node = NULL;
1121 struct mmu_rb_node *rb_node;
1123 rb_node = hfi1_mmu_rb_extract(pq->handler,
1124 (unsigned long)iovec->iov.iov_base,
1125 iovec->iov.iov_len);
1127 node = container_of(rb_node, struct sdma_mmu_node, rb);
1132 node = kzalloc(sizeof(*node), GFP_KERNEL);
1136 node->rb.addr = (unsigned long)iovec->iov.iov_base;
1138 atomic_set(&node->refcount, 0);
1141 npages = num_user_pages(&iovec->iov);
1142 if (node->npages < npages) {
1143 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1145 SDMA_DBG(req, "Failed page array alloc");
1149 memcpy(pages, node->pages, node->npages * sizeof(*pages));
1151 npages -= node->npages;
1154 if (!hfi1_can_pin_pages(pq->dd, pq->mm,
1155 atomic_read(&pq->n_locked), npages)) {
1156 cleared = sdma_cache_evict(pq, npages);
1157 if (cleared >= npages)
1160 pinned = hfi1_acquire_user_pages(pq->mm,
1161 ((unsigned long)iovec->iov.iov_base +
1162 (node->npages * PAGE_SIZE)), npages, 0,
1163 pages + node->npages);
1169 if (pinned != npages) {
1170 unpin_vector_pages(pq->mm, pages, node->npages,
1176 node->rb.len = iovec->iov.iov_len;
1177 node->pages = pages;
1178 node->npages += pinned;
1179 npages = node->npages;
1180 atomic_add(pinned, &pq->n_locked);
1182 iovec->pages = node->pages;
1183 iovec->npages = npages;
1186 ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
1188 atomic_sub(node->npages, &pq->n_locked);
1195 unpin_vector_pages(pq->mm, node->pages, 0, node->npages);
1200 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1201 unsigned start, unsigned npages)
1203 hfi1_release_user_pages(mm, pages + start, npages, false);
1207 static int check_header_template(struct user_sdma_request *req,
1208 struct hfi1_pkt_header *hdr, u32 lrhlen,
1212 * Perform safety checks for any type of packet:
1213 * - transfer size is multiple of 64bytes
1214 * - packet length is multiple of 4 bytes
1215 * - packet length is not larger than MTU size
1217 * These checks are only done for the first packet of the
1218 * transfer since the header is "given" to us by user space.
1219 * For the remainder of the packets we compute the values.
1221 if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
1222 lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1225 if (req_opcode(req->info.ctrl) == EXPECTED) {
1227 * The header is checked only on the first packet. Furthermore,
1228 * we ensure that at least one TID entry is copied when the
1229 * request is submitted. Therefore, we don't have to verify that
1230 * tididx points to something sane.
1232 u32 tidval = req->tids[req->tididx],
1233 tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1234 tididx = EXP_TID_GET(tidval, IDX),
1235 tidctrl = EXP_TID_GET(tidval, CTRL),
1237 __le32 kval = hdr->kdeth.ver_tid_offset;
1239 tidoff = KDETH_GET(kval, OFFSET) *
1240 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1241 KDETH_OM_LARGE : KDETH_OM_SMALL);
1243 * Expected receive packets have the following
1244 * additional checks:
1245 * - offset is not larger than the TID size
1246 * - TIDCtrl values match between header and TID array
1247 * - TID indexes match between header and TID array
1249 if ((tidoff + datalen > tidlen) ||
1250 KDETH_GET(kval, TIDCTRL) != tidctrl ||
1251 KDETH_GET(kval, TID) != tididx)
1258 * Correctly set the BTH.PSN field based on type of
1259 * transfer - eager packets can just increment the PSN but
1260 * expected packets encode generation and sequence in the
1261 * BTH.PSN field so just incrementing will result in errors.
1263 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1265 u32 val = be32_to_cpu(bthpsn),
1266 mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1270 psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1276 static int set_txreq_header(struct user_sdma_request *req,
1277 struct user_sdma_txreq *tx, u32 datalen)
1279 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1280 struct hfi1_pkt_header *hdr = &tx->hdr;
1283 u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1285 /* Copy the header template to the request before modification */
1286 memcpy(hdr, &req->hdr, sizeof(*hdr));
1289 * Check if the PBC and LRH length are mismatched. If so
1290 * adjust both in the header.
1292 pbclen = le16_to_cpu(hdr->pbc[0]);
1293 if (PBC2LRH(pbclen) != lrhlen) {
1294 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1295 hdr->pbc[0] = cpu_to_le16(pbclen);
1296 hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1299 * This is the first packet in the sequence that has
1300 * a "static" size that can be used for the rest of
1301 * the packets (besides the last one).
1303 if (unlikely(req->seqnum == 2)) {
1305 * From this point on the lengths in both the
1306 * PBC and LRH are the same until the last
1308 * Adjust the template so we don't have to update
1311 req->hdr.pbc[0] = hdr->pbc[0];
1312 req->hdr.lrh[2] = hdr->lrh[2];
1316 * We only have to modify the header if this is not the
1317 * first packet in the request. Otherwise, we use the
1318 * header given to us.
1320 if (unlikely(!req->seqnum)) {
1321 ret = check_header_template(req, hdr, lrhlen, datalen);
1327 hdr->bth[2] = cpu_to_be32(
1328 set_pkt_bth_psn(hdr->bth[2],
1329 (req_opcode(req->info.ctrl) == EXPECTED),
1332 /* Set ACK request on last packet */
1333 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1334 hdr->bth[2] |= cpu_to_be32(1UL << 31);
1336 /* Set the new offset */
1337 hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1338 /* Expected packets have to fill in the new TID information */
1339 if (req_opcode(req->info.ctrl) == EXPECTED) {
1340 tidval = req->tids[req->tididx];
1342 * If the offset puts us at the end of the current TID,
1343 * advance everything.
1345 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1349 * Since we don't copy all the TIDs, all at once,
1350 * we have to check again.
1352 if (++req->tididx > req->n_tids - 1 ||
1353 !req->tids[req->tididx]) {
1356 tidval = req->tids[req->tididx];
1358 req->omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1359 KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE : KDETH_OM_SMALL;
1360 /* Set KDETH.TIDCtrl based on value for this TID. */
1361 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1362 EXP_TID_GET(tidval, CTRL));
1363 /* Set KDETH.TID based on value for this TID */
1364 KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1365 EXP_TID_GET(tidval, IDX));
1366 /* Clear KDETH.SH only on the last packet */
1367 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1368 KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1370 * Set the KDETH.OFFSET and KDETH.OM based on size of
1373 SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
1374 req->tidoffset, req->tidoffset / req->omfactor,
1375 req->omfactor != KDETH_OM_SMALL);
1376 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1377 req->tidoffset / req->omfactor);
1378 KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1379 req->omfactor != KDETH_OM_SMALL);
1382 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1383 req->info.comp_idx, hdr, tidval);
1384 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1387 static int set_txreq_header_ahg(struct user_sdma_request *req,
1388 struct user_sdma_txreq *tx, u32 len)
1391 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1392 struct hfi1_pkt_header *hdr = &req->hdr;
1393 u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1394 u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(len));
1396 if (PBC2LRH(pbclen) != lrhlen) {
1397 /* PBC.PbcLengthDWs */
1398 AHG_HEADER_SET(req->ahg, diff, 0, 0, 12,
1399 cpu_to_le16(LRH2PBC(lrhlen)));
1400 /* LRH.PktLen (we need the full 16 bits due to byte swap) */
1401 AHG_HEADER_SET(req->ahg, diff, 3, 0, 16,
1402 cpu_to_be16(lrhlen >> 2));
1406 * Do the common updates
1408 /* BTH.PSN and BTH.A */
1409 val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1410 (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1411 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT))
1413 AHG_HEADER_SET(req->ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
1414 AHG_HEADER_SET(req->ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
1416 AHG_HEADER_SET(req->ahg, diff, 15, 0, 16,
1417 cpu_to_le16(req->koffset & 0xffff));
1418 AHG_HEADER_SET(req->ahg, diff, 15, 16, 16,
1419 cpu_to_le16(req->koffset >> 16));
1420 if (req_opcode(req->info.ctrl) == EXPECTED) {
1423 tidval = req->tids[req->tididx];
1426 * If the offset puts us at the end of the current TID,
1427 * advance everything.
1429 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1433 * Since we don't copy all the TIDs, all at once,
1434 * we have to check again.
1436 if (++req->tididx > req->n_tids - 1 ||
1437 !req->tids[req->tididx]) {
1440 tidval = req->tids[req->tididx];
1442 req->omfactor = ((EXP_TID_GET(tidval, LEN) *
1444 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE :
1446 /* KDETH.OM and KDETH.OFFSET (TID) */
1447 AHG_HEADER_SET(req->ahg, diff, 7, 0, 16,
1448 ((!!(req->omfactor - KDETH_OM_SMALL)) << 15 |
1449 ((req->tidoffset / req->omfactor) & 0x7fff)));
1450 /* KDETH.TIDCtrl, KDETH.TID */
1451 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1452 (EXP_TID_GET(tidval, IDX) & 0x3ff));
1453 /* Clear KDETH.SH on last packet */
1454 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_LAST_PKT)) {
1455 val |= cpu_to_le16(KDETH_GET(hdr->kdeth.ver_tid_offset,
1457 AHG_KDETH_INTR_SHIFT);
1458 val &= cpu_to_le16(~(1U << 13));
1459 AHG_HEADER_SET(req->ahg, diff, 7, 16, 14, val);
1461 AHG_HEADER_SET(req->ahg, diff, 7, 16, 12, val);
1465 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1466 req->info.comp_idx, req->sde->this_idx,
1467 req->ahg_idx, req->ahg, diff, tidval);
1472 * user_sdma_txreq_cb() - SDMA tx request completion callback.
1473 * @txreq: valid sdma tx request
1474 * @status: success/failure of request
1476 * Called when the SDMA progress state machine gets notification that
1477 * the SDMA descriptors for this tx request have been processed by the
1478 * DMA engine. Called in interrupt context.
1479 * Only do work on completed sequences.
1481 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
1483 struct user_sdma_txreq *tx =
1484 container_of(txreq, struct user_sdma_txreq, txreq);
1485 struct user_sdma_request *req;
1486 struct hfi1_user_sdma_pkt_q *pq;
1487 struct hfi1_user_sdma_comp_q *cq;
1488 enum hfi1_sdma_comp_state state = COMPLETE;
1497 if (status != SDMA_TXREQ_S_OK) {
1498 SDMA_DBG(req, "SDMA completion with error %d",
1500 set_bit(SDMA_REQ_HAS_ERROR, &req->flags);
1504 req->seqcomp = tx->seqnum;
1505 kmem_cache_free(pq->txreq_cache, tx);
1507 /* sequence isn't complete? We are done */
1508 if (req->seqcomp != req->info.npkts - 1)
1511 user_sdma_free_request(req, false);
1512 set_comp_state(pq, cq, req->info.comp_idx, state, status);
1516 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1518 if (atomic_dec_and_test(&pq->n_reqs))
1522 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1524 if (!list_empty(&req->txps)) {
1525 struct sdma_txreq *t, *p;
1527 list_for_each_entry_safe(t, p, &req->txps, list) {
1528 struct user_sdma_txreq *tx =
1529 container_of(t, struct user_sdma_txreq, txreq);
1530 list_del_init(&t->list);
1531 sdma_txclean(req->pq->dd, t);
1532 kmem_cache_free(req->pq->txreq_cache, tx);
1535 if (req->data_iovs) {
1536 struct sdma_mmu_node *node;
1539 for (i = 0; i < req->data_iovs; i++) {
1540 node = req->iovs[i].node;
1544 req->iovs[i].node = NULL;
1547 hfi1_mmu_rb_remove(req->pq->handler,
1550 atomic_dec(&node->refcount);
1554 clear_bit(req->info.comp_idx, req->pq->req_in_use);
1557 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1558 struct hfi1_user_sdma_comp_q *cq,
1559 u16 idx, enum hfi1_sdma_comp_state state,
1562 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d",
1563 pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret);
1564 cq->comps[idx].status = state;
1566 cq->comps[idx].errcode = -ret;
1567 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1571 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
1574 return (bool)(node->addr == addr);
1577 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
1579 struct sdma_mmu_node *node =
1580 container_of(mnode, struct sdma_mmu_node, rb);
1582 atomic_inc(&node->refcount);
1587 * Return 1 to remove the node from the rb tree and call the remove op.
1589 * Called with the rb tree lock held.
1591 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
1592 void *evict_arg, bool *stop)
1594 struct sdma_mmu_node *node =
1595 container_of(mnode, struct sdma_mmu_node, rb);
1596 struct evict_data *evict_data = evict_arg;
1598 /* is this node still being used? */
1599 if (atomic_read(&node->refcount))
1600 return 0; /* keep this node */
1602 /* this node will be evicted, add its pages to our count */
1603 evict_data->cleared += node->npages;
1605 /* have enough pages been cleared? */
1606 if (evict_data->cleared >= evict_data->target)
1609 return 1; /* remove this node */
1612 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
1614 struct sdma_mmu_node *node =
1615 container_of(mnode, struct sdma_mmu_node, rb);
1617 atomic_sub(node->npages, &node->pq->n_locked);
1619 unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
1624 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
1626 struct sdma_mmu_node *node =
1627 container_of(mnode, struct sdma_mmu_node, rb);
1629 if (!atomic_read(&node->refcount))