1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2016 - 2020 Intel Corporation.
6 #include <linux/hash.h>
7 #include <linux/bitops.h>
8 #include <linux/lockdep.h>
9 #include <linux/vmalloc.h>
10 #include <linux/slab.h>
11 #include <rdma/ib_verbs.h>
12 #include <rdma/ib_hdrs.h>
13 #include <rdma/opa_addr.h>
14 #include <rdma/uverbs_ioctl.h>
19 #define RVT_RWQ_COUNT_THRESHOLD 16
21 static void rvt_rc_timeout(struct timer_list *t);
22 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
23 enum ib_qp_type type);
26 * Convert the AETH RNR timeout code into the number of microseconds.
28 static const u32 ib_rvt_rnr_table[32] = {
29 655360, /* 00: 655.36 */
49 10240, /* 14: 10.24 */
50 15360, /* 15: 15.36 */
51 20480, /* 16: 20.48 */
52 30720, /* 17: 30.72 */
53 40960, /* 18: 40.96 */
54 61440, /* 19: 61.44 */
55 81920, /* 1A: 81.92 */
56 122880, /* 1B: 122.88 */
57 163840, /* 1C: 163.84 */
58 245760, /* 1D: 245.76 */
59 327680, /* 1E: 327.68 */
60 491520 /* 1F: 491.52 */
64 * Note that it is OK to post send work requests in the SQE and ERR
65 * states; rvt_do_send() will process them and generate error
66 * completions as per IB 1.2 C10-96.
68 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
70 [IB_QPS_INIT] = RVT_POST_RECV_OK,
71 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
72 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
73 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
74 RVT_PROCESS_NEXT_SEND_OK,
75 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
76 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
77 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
78 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
79 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
80 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
82 EXPORT_SYMBOL(ib_rvt_state_ops);
84 /* platform specific: return the last level cache (llc) size, in KiB */
85 static int rvt_wss_llc_size(void)
87 /* assume that the boot CPU value is universal for all CPUs */
88 return boot_cpu_data.x86_cache_size;
91 /* platform specific: cacheless copy */
92 static void cacheless_memcpy(void *dst, void *src, size_t n)
95 * Use the only available X64 cacheless copy. Add a __user cast
96 * to quiet sparse. The src agument is already in the kernel so
97 * there are no security issues. The extra fault recovery machinery
100 __copy_user_nocache(dst, (void __user *)src, n, 0);
103 void rvt_wss_exit(struct rvt_dev_info *rdi)
105 struct rvt_wss *wss = rdi->wss;
110 /* coded to handle partially initialized and repeat callers */
118 * rvt_wss_init - Init wss data structures
120 * Return: 0 on success
122 int rvt_wss_init(struct rvt_dev_info *rdi)
124 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
125 unsigned int wss_threshold = rdi->dparms.wss_threshold;
126 unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
132 int node = rdi->dparms.node;
134 if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
139 rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
144 /* check for a valid percent range - default to 80 if none or invalid */
145 if (wss_threshold < 1 || wss_threshold > 100)
148 /* reject a wildly large period */
149 if (wss_clean_period > 1000000)
150 wss_clean_period = 256;
152 /* reject a zero period */
153 if (wss_clean_period == 0)
154 wss_clean_period = 1;
157 * Calculate the table size - the next power of 2 larger than the
158 * LLC size. LLC size is in KiB.
160 llc_size = rvt_wss_llc_size() * 1024;
161 table_size = roundup_pow_of_two(llc_size);
163 /* one bit per page in rounded up table */
164 llc_bits = llc_size / PAGE_SIZE;
165 table_bits = table_size / PAGE_SIZE;
166 wss->pages_mask = table_bits - 1;
167 wss->num_entries = table_bits / BITS_PER_LONG;
169 wss->threshold = (llc_bits * wss_threshold) / 100;
170 if (wss->threshold == 0)
173 wss->clean_period = wss_clean_period;
174 atomic_set(&wss->clean_counter, wss_clean_period);
176 wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
187 * Advance the clean counter. When the clean period has expired,
190 * This is implemented in atomics to avoid locking. Because multiple
191 * variables are involved, it can be racy which can lead to slightly
192 * inaccurate information. Since this is only a heuristic, this is
193 * OK. Any innaccuracies will clean themselves out as the counter
194 * advances. That said, it is unlikely the entry clean operation will
195 * race - the next possible racer will not start until the next clean
198 * The clean counter is implemented as a decrement to zero. When zero
199 * is reached an entry is cleaned.
201 static void wss_advance_clean_counter(struct rvt_wss *wss)
207 /* become the cleaner if we decrement the counter to zero */
208 if (atomic_dec_and_test(&wss->clean_counter)) {
210 * Set, not add, the clean period. This avoids an issue
211 * where the counter could decrement below the clean period.
212 * Doing a set can result in lost decrements, slowing the
213 * clean advance. Since this a heuristic, this possible
216 * An alternative is to loop, advancing the counter by a
217 * clean period until the result is > 0. However, this could
218 * lead to several threads keeping another in the clean loop.
219 * This could be mitigated by limiting the number of times
220 * we stay in the loop.
222 atomic_set(&wss->clean_counter, wss->clean_period);
225 * Uniquely grab the entry to clean and move to next.
226 * The current entry is always the lower bits of
227 * wss.clean_entry. The table size, wss.num_entries,
228 * is always a power-of-2.
230 entry = (atomic_inc_return(&wss->clean_entry) - 1)
231 & (wss->num_entries - 1);
233 /* clear the entry and count the bits */
234 bits = xchg(&wss->entries[entry], 0);
235 weight = hweight64((u64)bits);
236 /* only adjust the contended total count if needed */
238 atomic_sub(weight, &wss->total_count);
243 * Insert the given address into the working set array.
245 static void wss_insert(struct rvt_wss *wss, void *address)
247 u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
248 u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
249 u32 nr = page & (BITS_PER_LONG - 1);
251 if (!test_and_set_bit(nr, &wss->entries[entry]))
252 atomic_inc(&wss->total_count);
254 wss_advance_clean_counter(wss);
258 * Is the working set larger than the threshold?
260 static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
262 return atomic_read(&wss->total_count) >= wss->threshold;
265 static void get_map_page(struct rvt_qpn_table *qpt,
266 struct rvt_qpn_map *map)
268 unsigned long page = get_zeroed_page(GFP_KERNEL);
271 * Free the page if someone raced with us installing it.
274 spin_lock(&qpt->lock);
278 map->page = (void *)page;
279 spin_unlock(&qpt->lock);
283 * init_qpn_table - initialize the QP number table for a device
284 * @rdi: rvt dev struct
285 * @qpt: the QPN table
287 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
290 struct rvt_qpn_map *map;
293 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
296 spin_lock_init(&qpt->lock);
298 qpt->last = rdi->dparms.qpn_start;
299 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
302 * Drivers may want some QPs beyond what we need for verbs let them use
303 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
304 * for those. The reserved range must be *after* the range which verbs
308 /* Figure out number of bit maps needed before reserved range */
309 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
311 /* This should always be zero */
312 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
314 /* Starting with the first reserved bit map */
315 map = &qpt->map[qpt->nmaps];
317 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
318 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
319 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
321 get_map_page(qpt, map);
327 set_bit(offset, map->page);
329 if (offset == RVT_BITS_PER_PAGE) {
340 * free_qpn_table - free the QP number table for a device
341 * @qpt: the QPN table
343 static void free_qpn_table(struct rvt_qpn_table *qpt)
347 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
348 free_page((unsigned long)qpt->map[i].page);
352 * rvt_driver_qp_init - Init driver qp resources
353 * @rdi: rvt dev strucutre
355 * Return: 0 on success
357 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
362 if (!rdi->dparms.qp_table_size)
366 * If driver is not doing any QP allocation then make sure it is
367 * providing the necessary QP functions.
369 if (!rdi->driver_f.free_all_qps ||
370 !rdi->driver_f.qp_priv_alloc ||
371 !rdi->driver_f.qp_priv_free ||
372 !rdi->driver_f.notify_qp_reset ||
373 !rdi->driver_f.notify_restart_rc)
376 /* allocate parent object */
377 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
382 /* allocate hash table */
383 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
384 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
385 rdi->qp_dev->qp_table =
386 kmalloc_array_node(rdi->qp_dev->qp_table_size,
387 sizeof(*rdi->qp_dev->qp_table),
388 GFP_KERNEL, rdi->dparms.node);
389 if (!rdi->qp_dev->qp_table)
392 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
393 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
395 spin_lock_init(&rdi->qp_dev->qpt_lock);
397 /* initialize qpn map */
398 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
401 spin_lock_init(&rdi->n_qps_lock);
406 kfree(rdi->qp_dev->qp_table);
407 free_qpn_table(&rdi->qp_dev->qpn_table);
416 * rvt_free_qp_cb - callback function to reset a qp
417 * @qp: the qp to reset
420 * This function resets the qp and removes it from the
423 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
425 unsigned int *qp_inuse = (unsigned int *)v;
426 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
428 /* Reset the qp and remove it from the qp hash list */
429 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
431 /* Increment the qp_inuse count */
436 * rvt_free_all_qps - check for QPs still in use
437 * @rdi: rvt device info structure
439 * There should not be any QPs still in use.
440 * Free memory for table.
441 * Return the number of QPs still in use.
443 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
445 unsigned int qp_inuse = 0;
447 qp_inuse += rvt_mcast_tree_empty(rdi);
449 rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
455 * rvt_qp_exit - clean up qps on device exit
456 * @rdi: rvt dev structure
458 * Check for qp leaks and free resources.
460 void rvt_qp_exit(struct rvt_dev_info *rdi)
462 u32 qps_inuse = rvt_free_all_qps(rdi);
465 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
470 kfree(rdi->qp_dev->qp_table);
471 free_qpn_table(&rdi->qp_dev->qpn_table);
475 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
476 struct rvt_qpn_map *map, unsigned off)
478 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
482 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
483 * IB_QPT_SMI/IB_QPT_GSI
484 * @rdi: rvt device info structure
485 * @qpt: queue pair number table pointer
487 * @port_num: IB port number, 1 based, comes from core
488 * @exclude_prefix: prefix of special queue pair number being allocated
490 * Return: The queue pair number
492 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
493 enum ib_qp_type type, u8 port_num, u8 exclude_prefix)
495 u32 i, offset, max_scan, qpn;
496 struct rvt_qpn_map *map;
498 u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
499 RVT_AIP_QPN_MAX : RVT_QPN_MAX;
501 if (rdi->driver_f.alloc_qpn)
502 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
504 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
507 ret = type == IB_QPT_GSI;
508 n = 1 << (ret + 2 * (port_num - 1));
509 spin_lock(&qpt->lock);
514 spin_unlock(&qpt->lock);
518 qpn = qpt->last + qpt->incr;
520 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
521 /* offset carries bit 0 */
522 offset = qpn & RVT_BITS_PER_PAGE_MASK;
523 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
524 max_scan = qpt->nmaps - !offset;
526 if (unlikely(!map->page)) {
527 get_map_page(qpt, map);
528 if (unlikely(!map->page))
532 if (!test_and_set_bit(offset, map->page)) {
539 * This qpn might be bogus if offset >= BITS_PER_PAGE.
540 * That is OK. It gets re-assigned below
542 qpn = mk_qpn(qpt, map, offset);
543 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
545 * In order to keep the number of pages allocated to a
546 * minimum, we scan the all existing pages before increasing
547 * the size of the bitmap table.
549 if (++i > max_scan) {
550 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
552 map = &qpt->map[qpt->nmaps++];
553 /* start at incr with current bit 0 */
554 offset = qpt->incr | (offset & 1);
555 } else if (map < &qpt->map[qpt->nmaps]) {
557 /* start at incr with current bit 0 */
558 offset = qpt->incr | (offset & 1);
561 /* wrap to first map page, invert bit 0 */
562 offset = qpt->incr | ((offset & 1) ^ 1);
564 /* there can be no set bits in low-order QoS bits */
565 WARN_ON(rdi->dparms.qos_shift > 1 &&
566 offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
567 qpn = mk_qpn(qpt, map, offset);
577 * rvt_clear_mr_refs - Drop help mr refs
578 * @qp: rvt qp data structure
579 * @clr_sends: If shoudl clear send side or not
581 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
584 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
586 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
587 rvt_put_ss(&qp->s_rdma_read_sge);
589 rvt_put_ss(&qp->r_sge);
592 while (qp->s_last != qp->s_head) {
593 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
595 rvt_put_qp_swqe(qp, wqe);
596 if (++qp->s_last >= qp->s_size)
598 smp_wmb(); /* see qp_set_savail */
601 rvt_put_mr(qp->s_rdma_mr);
602 qp->s_rdma_mr = NULL;
606 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
607 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
609 if (e->rdma_sge.mr) {
610 rvt_put_mr(e->rdma_sge.mr);
611 e->rdma_sge.mr = NULL;
617 * rvt_swqe_has_lkey - return true if lkey is used by swqe
621 * Test the swqe for using lkey
623 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
627 for (i = 0; i < wqe->wr.num_sge; i++) {
628 struct rvt_sge *sge = &wqe->sg_list[i];
630 if (rvt_mr_has_lkey(sge->mr, lkey))
637 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
641 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
643 u32 s_last = qp->s_last;
645 while (s_last != qp->s_head) {
646 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
648 if (rvt_swqe_has_lkey(wqe, lkey))
651 if (++s_last >= qp->s_size)
655 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
661 * rvt_qp_acks_has_lkey - return true if acks have lkey
665 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
668 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
670 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
671 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
673 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
680 * rvt_qp_mr_clean - clean up remote ops for lkey
682 * @lkey: the lkey that is being de-registered
684 * This routine checks if the lkey is being used by
687 * If so, the qp is put into an error state to elminate
688 * any references from the qp.
690 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
692 bool lastwqe = false;
694 if (qp->ibqp.qp_type == IB_QPT_SMI ||
695 qp->ibqp.qp_type == IB_QPT_GSI)
696 /* avoid special QPs */
698 spin_lock_irq(&qp->r_lock);
699 spin_lock(&qp->s_hlock);
700 spin_lock(&qp->s_lock);
702 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
705 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
706 rvt_qp_sends_has_lkey(qp, lkey) ||
707 rvt_qp_acks_has_lkey(qp, lkey))
708 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
710 spin_unlock(&qp->s_lock);
711 spin_unlock(&qp->s_hlock);
712 spin_unlock_irq(&qp->r_lock);
716 ev.device = qp->ibqp.device;
717 ev.element.qp = &qp->ibqp;
718 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
719 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
724 * rvt_remove_qp - remove qp form table
725 * @rdi: rvt dev struct
728 * Remove the QP from the table so it can't be found asynchronously by
729 * the receive routine.
731 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
733 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
734 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
738 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
740 if (rcu_dereference_protected(rvp->qp[0],
741 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
742 RCU_INIT_POINTER(rvp->qp[0], NULL);
743 } else if (rcu_dereference_protected(rvp->qp[1],
744 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
745 RCU_INIT_POINTER(rvp->qp[1], NULL);
748 struct rvt_qp __rcu **qpp;
751 qpp = &rdi->qp_dev->qp_table[n];
752 for (; (q = rcu_dereference_protected(*qpp,
753 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
756 RCU_INIT_POINTER(*qpp,
757 rcu_dereference_protected(qp->next,
758 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
760 trace_rvt_qpremove(qp, n);
766 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
774 * rvt_alloc_rq - allocate memory for user or kernel buffer
775 * @rq: receive queue data structure
776 * @size: number of request queue entries
777 * @node: The NUMA node
778 * @udata: True if user data is available or not false
780 * Return: If memory allocation failed, return -ENONEM
781 * This function is used by both shared receive
782 * queues and non-shared receive queues to allocate
785 int rvt_alloc_rq(struct rvt_rq *rq, u32 size, int node,
786 struct ib_udata *udata)
789 rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size);
792 /* need kwq with no buffers */
793 rq->kwq = kzalloc_node(sizeof(*rq->kwq), GFP_KERNEL, node);
796 rq->kwq->curr_wq = rq->wq->wq;
798 /* need kwq with buffers */
800 vzalloc_node(sizeof(struct rvt_krwq) + size, node);
803 rq->kwq->curr_wq = rq->kwq->wq;
806 spin_lock_init(&rq->kwq->p_lock);
807 spin_lock_init(&rq->kwq->c_lock);
815 * rvt_init_qp - initialize the QP state to the reset state
816 * @rdi: rvt dev struct
817 * @qp: the QP to init or reinit
820 * This function is called from both rvt_create_qp() and
821 * rvt_reset_qp(). The difference is that the reset
822 * patch the necessary locks to protect against concurent
825 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
826 enum ib_qp_type type)
830 qp->qp_access_flags = 0;
831 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
837 qp->s_sending_psn = 0;
838 qp->s_sending_hpsn = 0;
842 if (type == IB_QPT_RC) {
843 qp->s_state = IB_OPCODE_RC_SEND_LAST;
844 qp->r_state = IB_OPCODE_RC_SEND_LAST;
846 qp->s_state = IB_OPCODE_UC_SEND_LAST;
847 qp->r_state = IB_OPCODE_UC_SEND_LAST;
849 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
860 qp->s_mig_state = IB_MIG_MIGRATED;
861 qp->r_head_ack_queue = 0;
862 qp->s_tail_ack_queue = 0;
863 qp->s_acked_ack_queue = 0;
864 qp->s_num_rd_atomic = 0;
865 qp->r_sge.num_sge = 0;
866 atomic_set(&qp->s_reserved_used, 0);
870 * _rvt_reset_qp - initialize the QP state to the reset state
871 * @rdi: rvt dev struct
872 * @qp: the QP to reset
875 * r_lock, s_hlock, and s_lock are required to be held by the caller
877 static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
878 enum ib_qp_type type)
879 __must_hold(&qp->s_lock)
880 __must_hold(&qp->s_hlock)
881 __must_hold(&qp->r_lock)
883 lockdep_assert_held(&qp->r_lock);
884 lockdep_assert_held(&qp->s_hlock);
885 lockdep_assert_held(&qp->s_lock);
886 if (qp->state != IB_QPS_RESET) {
887 qp->state = IB_QPS_RESET;
889 /* Let drivers flush their waitlist */
890 rdi->driver_f.flush_qp_waiters(qp);
891 rvt_stop_rc_timers(qp);
892 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
893 spin_unlock(&qp->s_lock);
894 spin_unlock(&qp->s_hlock);
895 spin_unlock_irq(&qp->r_lock);
897 /* Stop the send queue and the retry timer */
898 rdi->driver_f.stop_send_queue(qp);
899 rvt_del_timers_sync(qp);
900 /* Wait for things to stop */
901 rdi->driver_f.quiesce_qp(qp);
903 /* take qp out the hash and wait for it to be unused */
904 rvt_remove_qp(rdi, qp);
906 /* grab the lock b/c it was locked at call time */
907 spin_lock_irq(&qp->r_lock);
908 spin_lock(&qp->s_hlock);
909 spin_lock(&qp->s_lock);
911 rvt_clear_mr_refs(qp, 1);
913 * Let the driver do any tear down or re-init it needs to for
914 * a qp that has been reset
916 rdi->driver_f.notify_qp_reset(qp);
918 rvt_init_qp(rdi, qp, type);
919 lockdep_assert_held(&qp->r_lock);
920 lockdep_assert_held(&qp->s_hlock);
921 lockdep_assert_held(&qp->s_lock);
925 * rvt_reset_qp - initialize the QP state to the reset state
926 * @rdi: the device info
927 * @qp: the QP to reset
930 * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
931 * before calling _rvt_reset_qp().
933 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
934 enum ib_qp_type type)
936 spin_lock_irq(&qp->r_lock);
937 spin_lock(&qp->s_hlock);
938 spin_lock(&qp->s_lock);
939 _rvt_reset_qp(rdi, qp, type);
940 spin_unlock(&qp->s_lock);
941 spin_unlock(&qp->s_hlock);
942 spin_unlock_irq(&qp->r_lock);
946 * rvt_free_qpn - Free a qpn from the bit map
948 * @qpn: queue pair number to free
950 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
952 struct rvt_qpn_map *map;
954 if ((qpn & RVT_AIP_QP_PREFIX_MASK) == RVT_AIP_QP_BASE)
955 qpn &= RVT_AIP_QP_SUFFIX;
957 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
959 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
963 * get_allowed_ops - Given a QP type return the appropriate allowed OP
964 * @type: valid, supported, QP type
966 static u8 get_allowed_ops(enum ib_qp_type type)
968 return type == IB_QPT_RC ? IB_OPCODE_RC : type == IB_QPT_UC ?
969 IB_OPCODE_UC : IB_OPCODE_UD;
973 * free_ud_wq_attr - Clean up AH attribute cache for UD QPs
974 * @qp: Valid QP with allowed_ops set
976 * The rvt_swqe data structure being used is a union, so this is
977 * only valid for UD QPs.
979 static void free_ud_wq_attr(struct rvt_qp *qp)
981 struct rvt_swqe *wqe;
984 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
985 wqe = rvt_get_swqe_ptr(qp, i);
986 kfree(wqe->ud_wr.attr);
987 wqe->ud_wr.attr = NULL;
992 * alloc_ud_wq_attr - AH attribute cache for UD QPs
993 * @qp: Valid QP with allowed_ops set
994 * @node: Numa node for allocation
996 * The rvt_swqe data structure being used is a union, so this is
997 * only valid for UD QPs.
999 static int alloc_ud_wq_attr(struct rvt_qp *qp, int node)
1001 struct rvt_swqe *wqe;
1004 for (i = 0; qp->allowed_ops == IB_OPCODE_UD && i < qp->s_size; i++) {
1005 wqe = rvt_get_swqe_ptr(qp, i);
1006 wqe->ud_wr.attr = kzalloc_node(sizeof(*wqe->ud_wr.attr),
1008 if (!wqe->ud_wr.attr) {
1009 free_ud_wq_attr(qp);
1018 * rvt_create_qp - create a queue pair for a device
1019 * @ibqp: the queue pair
1020 * @init_attr: the attributes of the queue pair
1021 * @udata: user data for libibverbs.so
1023 * Queue pair creation is mostly an rvt issue. However, drivers have their own
1024 * unique idea of what queue pair numbers mean. For instance there is a reserved
1027 * Return: 0 on success, otherwise returns an errno.
1029 * Called by the ib_create_qp() core verbs function.
1031 int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
1032 struct ib_udata *udata)
1034 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1036 struct rvt_swqe *swq = NULL;
1038 size_t sg_list_sz = 0;
1039 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1042 u8 exclude_prefix = 0;
1047 if (init_attr->create_flags & ~IB_QP_CREATE_NETDEV_USE)
1050 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
1051 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr)
1054 /* Check receive queue parameters if no SRQ is specified. */
1055 if (!init_attr->srq) {
1056 if (init_attr->cap.max_recv_sge >
1057 rdi->dparms.props.max_recv_sge ||
1058 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
1061 if (init_attr->cap.max_send_sge +
1062 init_attr->cap.max_send_wr +
1063 init_attr->cap.max_recv_sge +
1064 init_attr->cap.max_recv_wr == 0)
1068 init_attr->cap.max_send_wr + 1 +
1069 rdi->dparms.reserved_operations;
1070 switch (init_attr->qp_type) {
1073 if (init_attr->port_num == 0 ||
1074 init_attr->port_num > ibqp->device->phys_port_cnt)
1080 sz = struct_size(swq, sg_list, init_attr->cap.max_send_sge);
1081 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
1085 if (init_attr->srq) {
1086 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
1088 if (srq->rq.max_sge > 1)
1089 sg_list_sz = sizeof(*qp->r_sg_list) *
1090 (srq->rq.max_sge - 1);
1091 } else if (init_attr->cap.max_recv_sge > 1)
1092 sg_list_sz = sizeof(*qp->r_sg_list) *
1093 (init_attr->cap.max_recv_sge - 1);
1095 kzalloc_node(sg_list_sz, GFP_KERNEL, rdi->dparms.node);
1098 qp->allowed_ops = get_allowed_ops(init_attr->qp_type);
1100 RCU_INIT_POINTER(qp->next, NULL);
1101 if (init_attr->qp_type == IB_QPT_RC) {
1103 kcalloc_node(rvt_max_atomic(rdi),
1104 sizeof(*qp->s_ack_queue),
1107 if (!qp->s_ack_queue)
1110 /* initialize timers needed for rc qp */
1111 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
1112 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
1114 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
1117 * Driver needs to set up it's private QP structure and do any
1118 * initialization that is needed.
1120 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
1122 ret = PTR_ERR(priv);
1126 qp->timeout_jiffies =
1127 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1129 if (init_attr->srq) {
1132 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1133 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1134 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1135 sizeof(struct rvt_rwqe);
1136 ret = rvt_alloc_rq(&qp->r_rq, qp->r_rq.size * sz,
1137 rdi->dparms.node, udata);
1139 goto bail_driver_priv;
1143 * ib_create_qp() will initialize qp->ibqp
1144 * except for qp->ibqp.qp_num.
1146 spin_lock_init(&qp->r_lock);
1147 spin_lock_init(&qp->s_hlock);
1148 spin_lock_init(&qp->s_lock);
1149 atomic_set(&qp->refcount, 0);
1150 atomic_set(&qp->local_ops_pending, 0);
1151 init_waitqueue_head(&qp->wait);
1152 INIT_LIST_HEAD(&qp->rspwait);
1153 qp->state = IB_QPS_RESET;
1155 qp->s_size = sqsize;
1156 qp->s_avail = init_attr->cap.max_send_wr;
1157 qp->s_max_sge = init_attr->cap.max_send_sge;
1158 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1159 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
1160 ret = alloc_ud_wq_attr(qp, rdi->dparms.node);
1164 if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1165 exclude_prefix = RVT_AIP_QP_PREFIX;
1167 ret = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
1169 init_attr->port_num,
1174 qp->ibqp.qp_num = ret;
1175 if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
1176 qp->ibqp.qp_num |= RVT_AIP_QP_BASE;
1177 qp->port_num = init_attr->port_num;
1178 rvt_init_qp(rdi, qp, init_attr->qp_type);
1179 if (rdi->driver_f.qp_priv_init) {
1180 ret = rdi->driver_f.qp_priv_init(rdi, qp, init_attr);
1187 /* Don't support raw QPs */
1191 init_attr->cap.max_inline_data = 0;
1194 * Return the address of the RWQ as the offset to mmap.
1195 * See rvt_mmap() for details.
1197 if (udata && udata->outlen >= sizeof(__u64)) {
1201 ret = ib_copy_to_udata(udata, &offset,
1206 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
1208 qp->ip = rvt_create_mmap_info(rdi, s, udata,
1210 if (IS_ERR(qp->ip)) {
1211 ret = PTR_ERR(qp->ip);
1215 ret = ib_copy_to_udata(udata, &qp->ip->offset,
1216 sizeof(qp->ip->offset));
1220 qp->pid = current->pid;
1223 spin_lock(&rdi->n_qps_lock);
1224 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
1225 spin_unlock(&rdi->n_qps_lock);
1230 rdi->n_qps_allocated++;
1232 * Maintain a busy_jiffies variable that will be added to the timeout
1233 * period in mod_retry_timer and add_retry_timer. This busy jiffies
1234 * is scaled by the number of rc qps created for the device to reduce
1235 * the number of timeouts occurring when there is a large number of
1236 * qps. busy_jiffies is incremented every rc qp scaling interval.
1237 * The scaling interval is selected based on extensive performance
1238 * evaluation of targeted workloads.
1240 if (init_attr->qp_type == IB_QPT_RC) {
1242 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1244 spin_unlock(&rdi->n_qps_lock);
1247 spin_lock_irq(&rdi->pending_lock);
1248 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1249 spin_unlock_irq(&rdi->pending_lock);
1256 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1259 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1262 free_ud_wq_attr(qp);
1265 rvt_free_rq(&qp->r_rq);
1268 rdi->driver_f.qp_priv_free(rdi, qp);
1271 kfree(qp->s_ack_queue);
1272 kfree(qp->r_sg_list);
1278 * rvt_error_qp - put a QP into the error state
1279 * @qp: the QP to put into the error state
1280 * @err: the receive completion error to signal if a RWQE is active
1282 * Flushes both send and receive work queues.
1284 * Return: true if last WQE event should be generated.
1285 * The QP r_lock and s_lock should be held and interrupts disabled.
1286 * If we are already in error state, just return.
1288 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1292 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1294 lockdep_assert_held(&qp->r_lock);
1295 lockdep_assert_held(&qp->s_lock);
1296 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1299 qp->state = IB_QPS_ERR;
1301 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1302 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1303 del_timer(&qp->s_timer);
1306 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1307 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1309 rdi->driver_f.notify_error_qp(qp);
1311 /* Schedule the sending tasklet to drain the send work queue. */
1312 if (READ_ONCE(qp->s_last) != qp->s_head)
1313 rdi->driver_f.schedule_send(qp);
1315 rvt_clear_mr_refs(qp, 0);
1317 memset(&wc, 0, sizeof(wc));
1319 wc.opcode = IB_WC_RECV;
1321 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1322 wc.wr_id = qp->r_wr_id;
1324 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1326 wc.status = IB_WC_WR_FLUSH_ERR;
1331 struct rvt_rwq *wq = NULL;
1332 struct rvt_krwq *kwq = NULL;
1334 spin_lock(&qp->r_rq.kwq->c_lock);
1335 /* qp->ip used to validate if there is a user buffer mmaped */
1338 head = RDMA_READ_UAPI_ATOMIC(wq->head);
1339 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
1345 /* sanity check pointers before trusting them */
1346 if (head >= qp->r_rq.size)
1348 if (tail >= qp->r_rq.size)
1350 while (tail != head) {
1351 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1352 if (++tail >= qp->r_rq.size)
1354 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1357 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
1360 spin_unlock(&qp->r_rq.kwq->c_lock);
1361 } else if (qp->ibqp.event_handler) {
1368 EXPORT_SYMBOL(rvt_error_qp);
1371 * Put the QP into the hash table.
1372 * The hash table holds a reference to the QP.
1374 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1376 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1377 unsigned long flags;
1380 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1382 if (qp->ibqp.qp_num <= 1) {
1383 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1385 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1387 qp->next = rdi->qp_dev->qp_table[n];
1388 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1389 trace_rvt_qpinsert(qp, n);
1392 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1396 * rvt_modify_qp - modify the attributes of a queue pair
1397 * @ibqp: the queue pair who's attributes we're modifying
1398 * @attr: the new attributes
1399 * @attr_mask: the mask of attributes to modify
1400 * @udata: user data for libibverbs.so
1402 * Return: 0 on success, otherwise returns an errno.
1404 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1405 int attr_mask, struct ib_udata *udata)
1407 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1408 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1409 enum ib_qp_state cur_state, new_state;
1413 int pmtu = 0; /* for gcc warning only */
1416 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
1419 spin_lock_irq(&qp->r_lock);
1420 spin_lock(&qp->s_hlock);
1421 spin_lock(&qp->s_lock);
1423 cur_state = attr_mask & IB_QP_CUR_STATE ?
1424 attr->cur_qp_state : qp->state;
1425 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1426 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1428 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1432 if (rdi->driver_f.check_modify_qp &&
1433 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1436 if (attr_mask & IB_QP_AV) {
1438 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1439 opa_get_mcast_base(OPA_MCAST_NR))
1442 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1443 be16_to_cpu(IB_MULTICAST_LID_BASE))
1447 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1451 if (attr_mask & IB_QP_ALT_PATH) {
1453 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1454 opa_get_mcast_base(OPA_MCAST_NR))
1457 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1458 be16_to_cpu(IB_MULTICAST_LID_BASE))
1462 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1464 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1468 if (attr_mask & IB_QP_PKEY_INDEX)
1469 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1472 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1473 if (attr->min_rnr_timer > 31)
1476 if (attr_mask & IB_QP_PORT)
1477 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1478 qp->ibqp.qp_type == IB_QPT_GSI ||
1479 attr->port_num == 0 ||
1480 attr->port_num > ibqp->device->phys_port_cnt)
1483 if (attr_mask & IB_QP_DEST_QPN)
1484 if (attr->dest_qp_num > RVT_QPN_MASK)
1487 if (attr_mask & IB_QP_RETRY_CNT)
1488 if (attr->retry_cnt > 7)
1491 if (attr_mask & IB_QP_RNR_RETRY)
1492 if (attr->rnr_retry > 7)
1496 * Don't allow invalid path_mtu values. OK to set greater
1497 * than the active mtu (or even the max_cap, if we have tuned
1498 * that to a small mtu. We'll set qp->path_mtu
1499 * to the lesser of requested attribute mtu and active,
1500 * for packetizing messages.
1501 * Note that the QP port has to be set in INIT and MTU in RTR.
1503 if (attr_mask & IB_QP_PATH_MTU) {
1504 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1509 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1510 if (attr->path_mig_state == IB_MIG_REARM) {
1511 if (qp->s_mig_state == IB_MIG_ARMED)
1513 if (new_state != IB_QPS_RTS)
1515 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1516 if (qp->s_mig_state == IB_MIG_REARM)
1518 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1520 if (qp->s_mig_state == IB_MIG_ARMED)
1527 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1528 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1531 switch (new_state) {
1533 if (qp->state != IB_QPS_RESET)
1534 _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1538 /* Allow event to re-trigger if QP set to RTR more than once */
1539 qp->r_flags &= ~RVT_R_COMM_EST;
1540 qp->state = new_state;
1544 qp->s_draining = qp->s_last != qp->s_cur;
1545 qp->state = new_state;
1549 if (qp->ibqp.qp_type == IB_QPT_RC)
1551 qp->state = new_state;
1555 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1559 qp->state = new_state;
1563 if (attr_mask & IB_QP_PKEY_INDEX)
1564 qp->s_pkey_index = attr->pkey_index;
1566 if (attr_mask & IB_QP_PORT)
1567 qp->port_num = attr->port_num;
1569 if (attr_mask & IB_QP_DEST_QPN)
1570 qp->remote_qpn = attr->dest_qp_num;
1572 if (attr_mask & IB_QP_SQ_PSN) {
1573 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1574 qp->s_psn = qp->s_next_psn;
1575 qp->s_sending_psn = qp->s_next_psn;
1576 qp->s_last_psn = qp->s_next_psn - 1;
1577 qp->s_sending_hpsn = qp->s_last_psn;
1580 if (attr_mask & IB_QP_RQ_PSN)
1581 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1583 if (attr_mask & IB_QP_ACCESS_FLAGS)
1584 qp->qp_access_flags = attr->qp_access_flags;
1586 if (attr_mask & IB_QP_AV) {
1587 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1588 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1589 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1592 if (attr_mask & IB_QP_ALT_PATH) {
1593 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1594 qp->s_alt_pkey_index = attr->alt_pkey_index;
1597 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1598 qp->s_mig_state = attr->path_mig_state;
1600 qp->remote_ah_attr = qp->alt_ah_attr;
1601 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1602 qp->s_pkey_index = qp->s_alt_pkey_index;
1606 if (attr_mask & IB_QP_PATH_MTU) {
1607 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1608 qp->log_pmtu = ilog2(qp->pmtu);
1611 if (attr_mask & IB_QP_RETRY_CNT) {
1612 qp->s_retry_cnt = attr->retry_cnt;
1613 qp->s_retry = attr->retry_cnt;
1616 if (attr_mask & IB_QP_RNR_RETRY) {
1617 qp->s_rnr_retry_cnt = attr->rnr_retry;
1618 qp->s_rnr_retry = attr->rnr_retry;
1621 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1622 qp->r_min_rnr_timer = attr->min_rnr_timer;
1624 if (attr_mask & IB_QP_TIMEOUT) {
1625 qp->timeout = attr->timeout;
1626 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1629 if (attr_mask & IB_QP_QKEY)
1630 qp->qkey = attr->qkey;
1632 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1633 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1635 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1636 qp->s_max_rd_atomic = attr->max_rd_atomic;
1638 if (rdi->driver_f.modify_qp)
1639 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1641 spin_unlock(&qp->s_lock);
1642 spin_unlock(&qp->s_hlock);
1643 spin_unlock_irq(&qp->r_lock);
1645 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1646 rvt_insert_qp(rdi, qp);
1649 ev.device = qp->ibqp.device;
1650 ev.element.qp = &qp->ibqp;
1651 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1652 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1655 ev.device = qp->ibqp.device;
1656 ev.element.qp = &qp->ibqp;
1657 ev.event = IB_EVENT_PATH_MIG;
1658 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1663 spin_unlock(&qp->s_lock);
1664 spin_unlock(&qp->s_hlock);
1665 spin_unlock_irq(&qp->r_lock);
1670 * rvt_destroy_qp - destroy a queue pair
1671 * @ibqp: the queue pair to destroy
1672 * @udata: unused by the driver
1674 * Note that this can be called while the QP is actively sending or
1677 * Return: 0 on success.
1679 int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1681 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1682 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1684 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1686 wait_event(qp->wait, !atomic_read(&qp->refcount));
1687 /* qpn is now available for use again */
1688 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1690 spin_lock(&rdi->n_qps_lock);
1691 rdi->n_qps_allocated--;
1692 if (qp->ibqp.qp_type == IB_QPT_RC) {
1694 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1696 spin_unlock(&rdi->n_qps_lock);
1699 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1700 kvfree(qp->r_rq.kwq);
1701 rdi->driver_f.qp_priv_free(rdi, qp);
1702 kfree(qp->s_ack_queue);
1703 kfree(qp->r_sg_list);
1704 rdma_destroy_ah_attr(&qp->remote_ah_attr);
1705 rdma_destroy_ah_attr(&qp->alt_ah_attr);
1706 free_ud_wq_attr(qp);
1712 * rvt_query_qp - query an ipbq
1713 * @ibqp: IB qp to query
1714 * @attr: attr struct to fill in
1715 * @attr_mask: attr mask ignored
1716 * @init_attr: struct to fill in
1720 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1721 int attr_mask, struct ib_qp_init_attr *init_attr)
1723 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1724 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1726 attr->qp_state = qp->state;
1727 attr->cur_qp_state = attr->qp_state;
1728 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1729 attr->path_mig_state = qp->s_mig_state;
1730 attr->qkey = qp->qkey;
1731 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1732 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1733 attr->dest_qp_num = qp->remote_qpn;
1734 attr->qp_access_flags = qp->qp_access_flags;
1735 attr->cap.max_send_wr = qp->s_size - 1 -
1736 rdi->dparms.reserved_operations;
1737 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1738 attr->cap.max_send_sge = qp->s_max_sge;
1739 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1740 attr->cap.max_inline_data = 0;
1741 attr->ah_attr = qp->remote_ah_attr;
1742 attr->alt_ah_attr = qp->alt_ah_attr;
1743 attr->pkey_index = qp->s_pkey_index;
1744 attr->alt_pkey_index = qp->s_alt_pkey_index;
1745 attr->en_sqd_async_notify = 0;
1746 attr->sq_draining = qp->s_draining;
1747 attr->max_rd_atomic = qp->s_max_rd_atomic;
1748 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1749 attr->min_rnr_timer = qp->r_min_rnr_timer;
1750 attr->port_num = qp->port_num;
1751 attr->timeout = qp->timeout;
1752 attr->retry_cnt = qp->s_retry_cnt;
1753 attr->rnr_retry = qp->s_rnr_retry_cnt;
1754 attr->alt_port_num =
1755 rdma_ah_get_port_num(&qp->alt_ah_attr);
1756 attr->alt_timeout = qp->alt_timeout;
1758 init_attr->event_handler = qp->ibqp.event_handler;
1759 init_attr->qp_context = qp->ibqp.qp_context;
1760 init_attr->send_cq = qp->ibqp.send_cq;
1761 init_attr->recv_cq = qp->ibqp.recv_cq;
1762 init_attr->srq = qp->ibqp.srq;
1763 init_attr->cap = attr->cap;
1764 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1765 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1767 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1768 init_attr->qp_type = qp->ibqp.qp_type;
1769 init_attr->port_num = qp->port_num;
1774 * rvt_post_recv - post a receive on a QP
1775 * @ibqp: the QP to post the receive on
1776 * @wr: the WR to post
1777 * @bad_wr: the first bad WR is put here
1779 * This may be called from interrupt context.
1781 * Return: 0 on success otherwise errno
1783 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1784 const struct ib_recv_wr **bad_wr)
1786 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1787 struct rvt_krwq *wq = qp->r_rq.kwq;
1788 unsigned long flags;
1789 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1792 /* Check that state is OK to post receive. */
1793 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1798 for (; wr; wr = wr->next) {
1799 struct rvt_rwqe *wqe;
1803 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1808 spin_lock_irqsave(&qp->r_rq.kwq->p_lock, flags);
1809 next = wq->head + 1;
1810 if (next >= qp->r_rq.size)
1812 if (next == READ_ONCE(wq->tail)) {
1813 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1817 if (unlikely(qp_err_flush)) {
1820 memset(&wc, 0, sizeof(wc));
1822 wc.opcode = IB_WC_RECV;
1823 wc.wr_id = wr->wr_id;
1824 wc.status = IB_WC_WR_FLUSH_ERR;
1825 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1827 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1828 wqe->wr_id = wr->wr_id;
1829 wqe->num_sge = wr->num_sge;
1830 for (i = 0; i < wr->num_sge; i++) {
1831 wqe->sg_list[i].addr = wr->sg_list[i].addr;
1832 wqe->sg_list[i].length = wr->sg_list[i].length;
1833 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
1836 * Make sure queue entry is written
1837 * before the head index.
1839 smp_store_release(&wq->head, next);
1841 spin_unlock_irqrestore(&qp->r_rq.kwq->p_lock, flags);
1847 * rvt_qp_valid_operation - validate post send wr request
1849 * @post_parms: the post send table for the driver
1850 * @wr: the work request
1852 * The routine validates the operation based on the
1853 * validation table an returns the length of the operation
1854 * which can extend beyond the ib_send_bw. Operation
1855 * dependent flags key atomic operation validation.
1857 * There is an exception for UD qps that validates the pd and
1858 * overrides the length to include the additional UD specific
1861 * Returns a negative error or the length of the work request
1862 * for building the swqe.
1864 static inline int rvt_qp_valid_operation(
1866 const struct rvt_operation_params *post_parms,
1867 const struct ib_send_wr *wr)
1871 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1873 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1875 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1876 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1878 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1879 (wr->num_sge == 0 ||
1880 wr->sg_list[0].length < sizeof(u64) ||
1881 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1883 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1884 !qp->s_max_rd_atomic)
1886 len = post_parms[wr->opcode].length;
1888 if (qp->ibqp.qp_type != IB_QPT_UC &&
1889 qp->ibqp.qp_type != IB_QPT_RC) {
1890 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1892 len = sizeof(struct ib_ud_wr);
1898 * rvt_qp_is_avail - determine queue capacity
1900 * @rdi: the rdmavt device
1901 * @reserved_op: is reserved operation
1903 * This assumes the s_hlock is held but the s_last
1904 * qp variable is uncontrolled.
1906 * For non reserved operations, the qp->s_avail
1909 * The return value is zero or a -ENOMEM.
1911 static inline int rvt_qp_is_avail(
1913 struct rvt_dev_info *rdi,
1920 /* see rvt_qp_wqe_unreserve() */
1921 smp_mb__before_atomic();
1922 if (unlikely(reserved_op)) {
1923 /* see rvt_qp_wqe_unreserve() */
1924 reserved_used = atomic_read(&qp->s_reserved_used);
1925 if (reserved_used >= rdi->dparms.reserved_operations)
1929 /* non-reserved operations */
1930 if (likely(qp->s_avail))
1932 /* See rvt_qp_complete_swqe() */
1933 slast = smp_load_acquire(&qp->s_last);
1934 if (qp->s_head >= slast)
1935 avail = qp->s_size - (qp->s_head - slast);
1937 avail = slast - qp->s_head;
1939 reserved_used = atomic_read(&qp->s_reserved_used);
1941 (rdi->dparms.reserved_operations - reserved_used);
1942 /* insure we don't assign a negative s_avail */
1943 if ((s32)avail <= 0)
1945 qp->s_avail = avail;
1946 if (WARN_ON(qp->s_avail >
1947 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1949 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1950 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1951 qp->s_head, qp->s_tail, qp->s_cur,
1952 qp->s_acked, qp->s_last);
1957 * rvt_post_one_wr - post one RC, UC, or UD send work request
1958 * @qp: the QP to post on
1959 * @wr: the work request to send
1960 * @call_send: kick the send engine into gear
1962 static int rvt_post_one_wr(struct rvt_qp *qp,
1963 const struct ib_send_wr *wr,
1966 struct rvt_swqe *wqe;
1971 struct rvt_lkey_table *rkt;
1973 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1978 int local_ops_delayed = 0;
1980 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1982 /* IB spec says that num_sge == 0 is OK. */
1983 if (unlikely(wr->num_sge > qp->s_max_sge))
1986 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1992 * Local operations include fast register and local invalidate.
1993 * Fast register needs to be processed immediately because the
1994 * registered lkey may be used by following work requests and the
1995 * lkey needs to be valid at the time those requests are posted.
1996 * Local invalidate can be processed immediately if fencing is
1997 * not required and no previous local invalidate ops are pending.
1998 * Signaled local operations that have been processed immediately
1999 * need to have requests with "completion only" flags set posted
2000 * to the send queue in order to generate completions.
2002 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
2003 switch (wr->opcode) {
2005 ret = rvt_fast_reg_mr(qp,
2008 reg_wr(wr)->access);
2009 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2012 case IB_WR_LOCAL_INV:
2013 if ((wr->send_flags & IB_SEND_FENCE) ||
2014 atomic_read(&qp->local_ops_pending)) {
2015 local_ops_delayed = 1;
2017 ret = rvt_invalidate_rkey(
2018 qp, wr->ex.invalidate_rkey);
2019 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
2028 reserved_op = rdi->post_parms[wr->opcode].flags &
2029 RVT_OPERATION_USE_RESERVE;
2030 /* check for avail */
2031 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
2034 next = qp->s_head + 1;
2035 if (next >= qp->s_size)
2038 rkt = &rdi->lkey_table;
2039 pd = ibpd_to_rvtpd(qp->ibqp.pd);
2040 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
2042 /* cplen has length from above */
2043 memcpy(&wqe->wr, wr, cplen);
2048 struct rvt_sge *last_sge = NULL;
2050 acc = wr->opcode >= IB_WR_RDMA_READ ?
2051 IB_ACCESS_LOCAL_WRITE : 0;
2052 for (i = 0; i < wr->num_sge; i++) {
2053 u32 length = wr->sg_list[i].length;
2057 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
2058 &wr->sg_list[i], acc);
2059 if (unlikely(ret < 0))
2060 goto bail_inval_free;
2061 wqe->length += length;
2063 last_sge = &wqe->sg_list[j];
2066 wqe->wr.num_sge = j;
2070 * Calculate and set SWQE PSN values prior to handing it off
2071 * to the driver's check routine. This give the driver the
2072 * opportunity to adjust PSN values based on internal checks.
2074 log_pmtu = qp->log_pmtu;
2075 if (qp->allowed_ops == IB_OPCODE_UD) {
2076 struct rvt_ah *ah = rvt_get_swqe_ah(wqe);
2078 log_pmtu = ah->log_pmtu;
2079 rdma_copy_ah_attr(wqe->ud_wr.attr, &ah->attr);
2082 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
2083 if (local_ops_delayed)
2084 atomic_inc(&qp->local_ops_pending);
2086 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
2091 wqe->ssn = qp->s_ssn++;
2092 wqe->psn = qp->s_next_psn;
2093 wqe->lpsn = wqe->psn +
2095 ((wqe->length - 1) >> log_pmtu) :
2099 /* general part of wqe valid - allow for driver checks */
2100 if (rdi->driver_f.setup_wqe) {
2101 ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
2103 goto bail_inval_free_ref;
2106 if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
2107 qp->s_next_psn = wqe->lpsn + 1;
2109 if (unlikely(reserved_op)) {
2110 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
2111 rvt_qp_wqe_reserve(qp, wqe);
2113 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
2116 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
2117 smp_wmb(); /* see request builders */
2122 bail_inval_free_ref:
2123 if (qp->allowed_ops == IB_OPCODE_UD)
2124 rdma_destroy_ah_attr(wqe->ud_wr.attr);
2126 /* release mr holds */
2128 struct rvt_sge *sge = &wqe->sg_list[--j];
2130 rvt_put_mr(sge->mr);
2136 * rvt_post_send - post a send on a QP
2137 * @ibqp: the QP to post the send on
2138 * @wr: the list of work requests to post
2139 * @bad_wr: the first bad WR is put here
2141 * This may be called from interrupt context.
2143 * Return: 0 on success else errno
2145 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2146 const struct ib_send_wr **bad_wr)
2148 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
2149 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2150 unsigned long flags = 0;
2155 spin_lock_irqsave(&qp->s_hlock, flags);
2158 * Ensure QP state is such that we can send. If not bail out early,
2159 * there is no need to do this every time we post a send.
2161 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
2162 spin_unlock_irqrestore(&qp->s_hlock, flags);
2167 * If the send queue is empty, and we only have a single WR then just go
2168 * ahead and kick the send engine into gear. Otherwise we will always
2169 * just schedule the send to happen later.
2171 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
2173 for (; wr; wr = wr->next) {
2174 err = rvt_post_one_wr(qp, wr, &call_send);
2175 if (unlikely(err)) {
2182 spin_unlock_irqrestore(&qp->s_hlock, flags);
2185 * Only call do_send if there is exactly one packet, and the
2186 * driver said it was ok.
2188 if (nreq == 1 && call_send)
2189 rdi->driver_f.do_send(qp);
2191 rdi->driver_f.schedule_send_no_lock(qp);
2197 * rvt_post_srq_recv - post a receive on a shared receive queue
2198 * @ibsrq: the SRQ to post the receive on
2199 * @wr: the list of work requests to post
2200 * @bad_wr: A pointer to the first WR to cause a problem is put here
2202 * This may be called from interrupt context.
2204 * Return: 0 on success else errno
2206 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2207 const struct ib_recv_wr **bad_wr)
2209 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
2210 struct rvt_krwq *wq;
2211 unsigned long flags;
2213 for (; wr; wr = wr->next) {
2214 struct rvt_rwqe *wqe;
2218 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
2223 spin_lock_irqsave(&srq->rq.kwq->p_lock, flags);
2225 next = wq->head + 1;
2226 if (next >= srq->rq.size)
2228 if (next == READ_ONCE(wq->tail)) {
2229 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2234 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
2235 wqe->wr_id = wr->wr_id;
2236 wqe->num_sge = wr->num_sge;
2237 for (i = 0; i < wr->num_sge; i++) {
2238 wqe->sg_list[i].addr = wr->sg_list[i].addr;
2239 wqe->sg_list[i].length = wr->sg_list[i].length;
2240 wqe->sg_list[i].lkey = wr->sg_list[i].lkey;
2242 /* Make sure queue entry is written before the head index. */
2243 smp_store_release(&wq->head, next);
2244 spin_unlock_irqrestore(&srq->rq.kwq->p_lock, flags);
2250 * rvt used the internal kernel struct as part of its ABI, for now make sure
2251 * the kernel struct does not change layout. FIXME: rvt should never cast the
2252 * user struct to a kernel struct.
2254 static struct ib_sge *rvt_cast_sge(struct rvt_wqe_sge *sge)
2256 BUILD_BUG_ON(offsetof(struct ib_sge, addr) !=
2257 offsetof(struct rvt_wqe_sge, addr));
2258 BUILD_BUG_ON(offsetof(struct ib_sge, length) !=
2259 offsetof(struct rvt_wqe_sge, length));
2260 BUILD_BUG_ON(offsetof(struct ib_sge, lkey) !=
2261 offsetof(struct rvt_wqe_sge, lkey));
2262 return (struct ib_sge *)sge;
2266 * Validate a RWQE and fill in the SGE state.
2269 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2273 struct rvt_lkey_table *rkt;
2275 struct rvt_sge_state *ss;
2276 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2278 rkt = &rdi->lkey_table;
2279 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2281 ss->sg_list = qp->r_sg_list;
2283 for (i = j = 0; i < wqe->num_sge; i++) {
2284 if (wqe->sg_list[i].length == 0)
2287 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2288 NULL, rvt_cast_sge(&wqe->sg_list[i]),
2289 IB_ACCESS_LOCAL_WRITE);
2290 if (unlikely(ret <= 0))
2292 qp->r_len += wqe->sg_list[i].length;
2296 ss->total_len = qp->r_len;
2301 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2303 rvt_put_mr(sge->mr);
2306 memset(&wc, 0, sizeof(wc));
2307 wc.wr_id = wqe->wr_id;
2308 wc.status = IB_WC_LOC_PROT_ERR;
2309 wc.opcode = IB_WC_RECV;
2311 /* Signal solicited completion event. */
2312 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2317 * get_rvt_head - get head indices of the circular buffer
2318 * @rq: data structure for request queue entry
2321 * Return - head index value
2323 static inline u32 get_rvt_head(struct rvt_rq *rq, void *ip)
2328 head = RDMA_READ_UAPI_ATOMIC(rq->wq->head);
2330 head = rq->kwq->head;
2336 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2338 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2340 * Return -1 if there is a local error, 0 if no RWQE is available,
2341 * otherwise return 1.
2343 * Can be called from interrupt level.
2345 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2347 unsigned long flags;
2349 struct rvt_krwq *kwq = NULL;
2351 struct rvt_srq *srq;
2352 struct rvt_rwqe *wqe;
2353 void (*handler)(struct ib_event *, void *);
2360 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2361 handler = srq->ibsrq.event_handler;
2371 spin_lock_irqsave(&rq->kwq->c_lock, flags);
2372 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2379 tail = RDMA_READ_UAPI_ATOMIC(wq->tail);
2384 /* Validate tail before using it since it is user writable. */
2385 if (tail >= rq->size)
2388 if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
2389 head = get_rvt_head(rq, ip);
2390 kwq->count = rvt_get_rq_count(rq, head, tail);
2392 if (unlikely(kwq->count == 0)) {
2396 /* Make sure entry is read after the count is read. */
2398 wqe = rvt_get_rwqe_ptr(rq, tail);
2400 * Even though we update the tail index in memory, the verbs
2401 * consumer is not supposed to post more entries until a
2402 * completion is generated.
2404 if (++tail >= rq->size)
2407 RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail);
2410 if (!wr_id_only && !init_sge(qp, wqe)) {
2414 qp->r_wr_id = wqe->wr_id;
2418 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2421 * Validate head pointer value and compute
2422 * the number of remaining WQEs.
2424 if (kwq->count < srq->limit) {
2426 rvt_get_rq_count(rq,
2427 get_rvt_head(rq, ip), tail);
2428 if (kwq->count < srq->limit) {
2432 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2433 ev.device = qp->ibqp.device;
2434 ev.element.srq = qp->ibqp.srq;
2435 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2436 handler(&ev, srq->ibsrq.srq_context);
2442 spin_unlock_irqrestore(&rq->kwq->c_lock, flags);
2446 EXPORT_SYMBOL(rvt_get_rwqe);
2449 * rvt_comm_est - handle trap with QP established
2452 void rvt_comm_est(struct rvt_qp *qp)
2454 qp->r_flags |= RVT_R_COMM_EST;
2455 if (qp->ibqp.event_handler) {
2458 ev.device = qp->ibqp.device;
2459 ev.element.qp = &qp->ibqp;
2460 ev.event = IB_EVENT_COMM_EST;
2461 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2464 EXPORT_SYMBOL(rvt_comm_est);
2466 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2468 unsigned long flags;
2471 spin_lock_irqsave(&qp->s_lock, flags);
2472 lastwqe = rvt_error_qp(qp, err);
2473 spin_unlock_irqrestore(&qp->s_lock, flags);
2478 ev.device = qp->ibqp.device;
2479 ev.element.qp = &qp->ibqp;
2480 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2481 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2484 EXPORT_SYMBOL(rvt_rc_error);
2487 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2488 * @index - the index
2489 * return usec from an index into ib_rvt_rnr_table
2491 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2493 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2495 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2497 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2499 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2500 IB_AETH_CREDIT_MASK];
2504 * rvt_add_retry_timer_ext - add/start a retry timer
2506 * @shift - timeout shift to wait for multiple packets
2507 * add a retry timer on the QP
2509 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift)
2511 struct ib_qp *ibqp = &qp->ibqp;
2512 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2514 lockdep_assert_held(&qp->s_lock);
2515 qp->s_flags |= RVT_S_TIMER;
2516 /* 4.096 usec. * (1 << qp->timeout) */
2517 qp->s_timer.expires = jiffies + rdi->busy_jiffies +
2518 (qp->timeout_jiffies << shift);
2519 add_timer(&qp->s_timer);
2521 EXPORT_SYMBOL(rvt_add_retry_timer_ext);
2524 * rvt_add_rnr_timer - add/start an rnr timer on the QP
2526 * @aeth: aeth of RNR timeout, simulated aeth for loopback
2528 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2532 lockdep_assert_held(&qp->s_lock);
2533 qp->s_flags |= RVT_S_WAIT_RNR;
2534 to = rvt_aeth_to_usec(aeth);
2535 trace_rvt_rnrnak_add(qp, to);
2536 hrtimer_start(&qp->s_rnr_timer,
2537 ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2539 EXPORT_SYMBOL(rvt_add_rnr_timer);
2542 * rvt_stop_rc_timers - stop all timers
2544 * stop any pending timers
2546 void rvt_stop_rc_timers(struct rvt_qp *qp)
2548 lockdep_assert_held(&qp->s_lock);
2549 /* Remove QP from all timers */
2550 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2551 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2552 del_timer(&qp->s_timer);
2553 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2556 EXPORT_SYMBOL(rvt_stop_rc_timers);
2559 * rvt_stop_rnr_timer - stop an rnr timer
2562 * stop an rnr timer and return if the timer
2565 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2567 lockdep_assert_held(&qp->s_lock);
2568 /* Remove QP from rnr timer */
2569 if (qp->s_flags & RVT_S_WAIT_RNR) {
2570 qp->s_flags &= ~RVT_S_WAIT_RNR;
2571 trace_rvt_rnrnak_stop(qp, 0);
2576 * rvt_del_timers_sync - wait for any timeout routines to exit
2579 void rvt_del_timers_sync(struct rvt_qp *qp)
2581 del_timer_sync(&qp->s_timer);
2582 hrtimer_cancel(&qp->s_rnr_timer);
2584 EXPORT_SYMBOL(rvt_del_timers_sync);
2587 * This is called from s_timer for missing responses.
2589 static void rvt_rc_timeout(struct timer_list *t)
2591 struct rvt_qp *qp = from_timer(qp, t, s_timer);
2592 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2593 unsigned long flags;
2595 spin_lock_irqsave(&qp->r_lock, flags);
2596 spin_lock(&qp->s_lock);
2597 if (qp->s_flags & RVT_S_TIMER) {
2598 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2600 qp->s_flags &= ~RVT_S_TIMER;
2601 rvp->n_rc_timeouts++;
2602 del_timer(&qp->s_timer);
2603 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2604 if (rdi->driver_f.notify_restart_rc)
2605 rdi->driver_f.notify_restart_rc(qp,
2608 rdi->driver_f.schedule_send(qp);
2610 spin_unlock(&qp->s_lock);
2611 spin_unlock_irqrestore(&qp->r_lock, flags);
2615 * This is called from s_timer for RNR timeouts.
2617 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2619 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2620 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2621 unsigned long flags;
2623 spin_lock_irqsave(&qp->s_lock, flags);
2624 rvt_stop_rnr_timer(qp);
2625 trace_rvt_rnrnak_timeout(qp, 0);
2626 rdi->driver_f.schedule_send(qp);
2627 spin_unlock_irqrestore(&qp->s_lock, flags);
2628 return HRTIMER_NORESTART;
2630 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2633 * rvt_qp_iter_init - initial for QP iteration
2636 * @cb: user-defined callback
2638 * This returns an iterator suitable for iterating QPs
2641 * The @cb is a user-defined callback and @v is a 64-bit
2642 * value passed to and relevant for processing in the
2643 * @cb. An example use case would be to alter QP processing
2644 * based on criteria not part of the rvt_qp.
2646 * Use cases that require memory allocation to succeed
2647 * must preallocate appropriately.
2649 * Return: a pointer to an rvt_qp_iter or NULL
2651 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2653 void (*cb)(struct rvt_qp *qp, u64 v))
2655 struct rvt_qp_iter *i;
2657 i = kzalloc(sizeof(*i), GFP_KERNEL);
2662 /* number of special QPs (SMI/GSI) for device */
2663 i->specials = rdi->ibdev.phys_port_cnt * 2;
2669 EXPORT_SYMBOL(rvt_qp_iter_init);
2672 * rvt_qp_iter_next - return the next QP in iter
2673 * @iter: the iterator
2675 * Fine grained QP iterator suitable for use
2676 * with debugfs seq_file mechanisms.
2678 * Updates iter->qp with the current QP when the return
2681 * Return: 0 - iter->qp is valid 1 - no more QPs
2683 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2688 struct rvt_qp *pqp = iter->qp;
2690 struct rvt_dev_info *rdi = iter->rdi;
2693 * The approach is to consider the special qps
2694 * as additional table entries before the
2695 * real hash table. Since the qp code sets
2696 * the qp->next hash link to NULL, this works just fine.
2698 * iter->specials is 2 * # ports
2700 * n = 0..iter->specials is the special qp indices
2702 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2703 * the potential hash bucket entries
2706 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2708 qp = rcu_dereference(pqp->next);
2710 if (n < iter->specials) {
2711 struct rvt_ibport *rvp;
2714 pidx = n % rdi->ibdev.phys_port_cnt;
2715 rvp = rdi->ports[pidx];
2716 qp = rcu_dereference(rvp->qp[n & 1]);
2718 qp = rcu_dereference(
2719 rdi->qp_dev->qp_table[
2720 (n - iter->specials)]);
2732 EXPORT_SYMBOL(rvt_qp_iter_next);
2735 * rvt_qp_iter - iterate all QPs
2737 * @v: a 64-bit value
2740 * This provides a way for iterating all QPs.
2742 * The @cb is a user-defined callback and @v is a 64-bit
2743 * value passed to and relevant for processing in the
2744 * cb. An example use case would be to alter QP processing
2745 * based on criteria not part of the rvt_qp.
2747 * The code has an internal iterator to simplify
2748 * non seq_file use cases.
2750 void rvt_qp_iter(struct rvt_dev_info *rdi,
2752 void (*cb)(struct rvt_qp *qp, u64 v))
2755 struct rvt_qp_iter i = {
2757 .specials = rdi->ibdev.phys_port_cnt * 2,
2764 ret = rvt_qp_iter_next(&i);
2775 EXPORT_SYMBOL(rvt_qp_iter);
2778 * This should be called with s_lock and r_lock held.
2780 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
2781 enum ib_wc_status status)
2784 struct rvt_dev_info *rdi;
2786 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2788 rdi = ib_to_rvt(qp->ibqp.device);
2790 old_last = qp->s_last;
2791 trace_rvt_qp_send_completion(qp, wqe, old_last);
2792 last = rvt_qp_complete_swqe(qp, wqe, rdi->wc_opcode[wqe->wr.opcode],
2794 if (qp->s_acked == old_last)
2796 if (qp->s_cur == old_last)
2798 if (qp->s_tail == old_last)
2800 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
2803 EXPORT_SYMBOL(rvt_send_complete);
2806 * rvt_copy_sge - copy data to SGE memory
2807 * @qp: associated QP
2808 * @ss: the SGE state
2809 * @data: the data to copy
2810 * @length: the length of the data
2811 * @release: boolean to release MR
2812 * @copy_last: do a separate copy of the last 8 bytes
2814 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
2815 void *data, u32 length,
2816 bool release, bool copy_last)
2818 struct rvt_sge *sge = &ss->sge;
2820 bool in_last = false;
2821 bool cacheless_copy = false;
2822 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2823 struct rvt_wss *wss = rdi->wss;
2824 unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
2826 if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
2827 cacheless_copy = length >= PAGE_SIZE;
2828 } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
2829 if (length >= PAGE_SIZE) {
2831 * NOTE: this *assumes*:
2832 * o The first vaddr is the dest.
2833 * o If multiple pages, then vaddr is sequential.
2835 wss_insert(wss, sge->vaddr);
2836 if (length >= (2 * PAGE_SIZE))
2837 wss_insert(wss, (sge->vaddr + PAGE_SIZE));
2839 cacheless_copy = wss_exceeds_threshold(wss);
2841 wss_advance_clean_counter(wss);
2856 u32 len = rvt_get_sge_length(sge, length);
2858 WARN_ON_ONCE(len == 0);
2859 if (unlikely(in_last)) {
2860 /* enforce byte transfer ordering */
2861 for (i = 0; i < len; i++)
2862 ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
2863 } else if (cacheless_copy) {
2864 cacheless_memcpy(sge->vaddr, data, len);
2866 memcpy(sge->vaddr, data, len);
2868 rvt_update_sge(ss, len, release);
2880 EXPORT_SYMBOL(rvt_copy_sge);
2882 static enum ib_wc_status loopback_qp_drop(struct rvt_ibport *rvp,
2887 * For RC, the requester would timeout and retry so
2888 * shortcut the timeouts and just signal too many retries.
2890 return sqp->ibqp.qp_type == IB_QPT_RC ?
2891 IB_WC_RETRY_EXC_ERR : IB_WC_SUCCESS;
2895 * rvt_ruc_loopback - handle UC and RC loopback requests
2896 * @sqp: the sending QP
2898 * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
2899 * Note that although we are single threaded due to the send engine, we still
2900 * have to protect against post_send(). We don't have to worry about
2901 * receive interrupts since this is a connected protocol and all packets
2902 * will pass through here.
2904 void rvt_ruc_loopback(struct rvt_qp *sqp)
2906 struct rvt_ibport *rvp = NULL;
2907 struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
2909 struct rvt_swqe *wqe;
2910 struct rvt_sge *sge;
2911 unsigned long flags;
2915 enum ib_wc_status send_status;
2918 bool copy_last = false;
2922 rvp = rdi->ports[sqp->port_num - 1];
2925 * Note that we check the responder QP state after
2926 * checking the requester's state.
2929 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
2932 spin_lock_irqsave(&sqp->s_lock, flags);
2934 /* Return if we are already busy processing a work request. */
2935 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
2936 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
2939 sqp->s_flags |= RVT_S_BUSY;
2942 if (sqp->s_last == READ_ONCE(sqp->s_head))
2944 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
2946 /* Return if it is not OK to start a new work request. */
2947 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
2948 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
2950 /* We are in the error state, flush the work request. */
2951 send_status = IB_WC_WR_FLUSH_ERR;
2956 * We can rely on the entry not changing without the s_lock
2957 * being held until we update s_last.
2958 * We increment s_cur to indicate s_last is in progress.
2960 if (sqp->s_last == sqp->s_cur) {
2961 if (++sqp->s_cur >= sqp->s_size)
2964 spin_unlock_irqrestore(&sqp->s_lock, flags);
2967 send_status = loopback_qp_drop(rvp, sqp);
2968 goto serr_no_r_lock;
2970 spin_lock_irqsave(&qp->r_lock, flags);
2971 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
2972 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
2973 send_status = loopback_qp_drop(rvp, sqp);
2977 memset(&wc, 0, sizeof(wc));
2978 send_status = IB_WC_SUCCESS;
2981 sqp->s_sge.sge = wqe->sg_list[0];
2982 sqp->s_sge.sg_list = wqe->sg_list + 1;
2983 sqp->s_sge.num_sge = wqe->wr.num_sge;
2984 sqp->s_len = wqe->length;
2985 switch (wqe->wr.opcode) {
2989 case IB_WR_LOCAL_INV:
2990 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
2991 if (rvt_invalidate_rkey(sqp,
2992 wqe->wr.ex.invalidate_rkey))
2993 send_status = IB_WC_LOC_PROT_ERR;
2998 case IB_WR_SEND_WITH_INV:
2999 case IB_WR_SEND_WITH_IMM:
3001 ret = rvt_get_rwqe(qp, false);
3006 if (wqe->length > qp->r_len)
3008 switch (wqe->wr.opcode) {
3009 case IB_WR_SEND_WITH_INV:
3010 if (!rvt_invalidate_rkey(qp,
3011 wqe->wr.ex.invalidate_rkey)) {
3012 wc.wc_flags = IB_WC_WITH_INVALIDATE;
3013 wc.ex.invalidate_rkey =
3014 wqe->wr.ex.invalidate_rkey;
3017 case IB_WR_SEND_WITH_IMM:
3018 wc.wc_flags = IB_WC_WITH_IMM;
3019 wc.ex.imm_data = wqe->wr.ex.imm_data;
3026 case IB_WR_RDMA_WRITE_WITH_IMM:
3027 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3029 wc.wc_flags = IB_WC_WITH_IMM;
3030 wc.ex.imm_data = wqe->wr.ex.imm_data;
3031 ret = rvt_get_rwqe(qp, true);
3036 /* skip copy_last set and qp_access_flags recheck */
3038 case IB_WR_RDMA_WRITE:
3039 copy_last = rvt_is_user_qp(qp);
3040 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3043 if (wqe->length == 0)
3045 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
3046 wqe->rdma_wr.remote_addr,
3048 IB_ACCESS_REMOTE_WRITE)))
3050 qp->r_sge.sg_list = NULL;
3051 qp->r_sge.num_sge = 1;
3052 qp->r_sge.total_len = wqe->length;
3055 case IB_WR_RDMA_READ:
3056 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3058 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
3059 wqe->rdma_wr.remote_addr,
3061 IB_ACCESS_REMOTE_READ)))
3064 sqp->s_sge.sg_list = NULL;
3065 sqp->s_sge.num_sge = 1;
3066 qp->r_sge.sge = wqe->sg_list[0];
3067 qp->r_sge.sg_list = wqe->sg_list + 1;
3068 qp->r_sge.num_sge = wqe->wr.num_sge;
3069 qp->r_sge.total_len = wqe->length;
3072 case IB_WR_ATOMIC_CMP_AND_SWP:
3073 case IB_WR_ATOMIC_FETCH_AND_ADD:
3074 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
3076 if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
3078 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3079 wqe->atomic_wr.remote_addr,
3080 wqe->atomic_wr.rkey,
3081 IB_ACCESS_REMOTE_ATOMIC)))
3083 /* Perform atomic OP and save result. */
3084 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3085 sdata = wqe->atomic_wr.compare_add;
3086 *(u64 *)sqp->s_sge.sge.vaddr =
3087 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
3088 (u64)atomic64_add_return(sdata, maddr) - sdata :
3089 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3090 sdata, wqe->atomic_wr.swap);
3091 rvt_put_mr(qp->r_sge.sge.mr);
3092 qp->r_sge.num_sge = 0;
3096 send_status = IB_WC_LOC_QP_OP_ERR;
3100 sge = &sqp->s_sge.sge;
3101 while (sqp->s_len) {
3102 u32 len = rvt_get_sge_length(sge, sqp->s_len);
3104 WARN_ON_ONCE(len == 0);
3105 rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
3106 len, release, copy_last);
3107 rvt_update_sge(&sqp->s_sge, len, !release);
3111 rvt_put_ss(&qp->r_sge);
3113 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
3116 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
3117 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
3119 wc.opcode = IB_WC_RECV;
3120 wc.wr_id = qp->r_wr_id;
3121 wc.status = IB_WC_SUCCESS;
3122 wc.byte_len = wqe->length;
3124 wc.src_qp = qp->remote_qpn;
3125 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3126 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3128 /* Signal completion event if the solicited bit is set. */
3129 rvt_recv_cq(qp, &wc, wqe->wr.send_flags & IB_SEND_SOLICITED);
3132 spin_unlock_irqrestore(&qp->r_lock, flags);
3133 spin_lock_irqsave(&sqp->s_lock, flags);
3136 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
3137 spin_lock(&sqp->r_lock);
3138 rvt_send_complete(sqp, wqe, send_status);
3139 spin_unlock(&sqp->r_lock);
3141 atomic_dec(&sqp->local_ops_pending);
3147 /* Handle RNR NAK */
3148 if (qp->ibqp.qp_type == IB_QPT_UC)
3152 * Note: we don't need the s_lock held since the BUSY flag
3153 * makes this single threaded.
3155 if (sqp->s_rnr_retry == 0) {
3156 send_status = IB_WC_RNR_RETRY_EXC_ERR;
3159 if (sqp->s_rnr_retry_cnt < 7)
3161 spin_unlock_irqrestore(&qp->r_lock, flags);
3162 spin_lock_irqsave(&sqp->s_lock, flags);
3163 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
3165 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
3166 IB_AETH_CREDIT_SHIFT);
3170 send_status = IB_WC_REM_OP_ERR;
3171 wc.status = IB_WC_LOC_QP_OP_ERR;
3176 sqp->ibqp.qp_type == IB_QPT_RC ?
3177 IB_WC_REM_INV_REQ_ERR :
3179 wc.status = IB_WC_LOC_QP_OP_ERR;
3183 send_status = IB_WC_REM_ACCESS_ERR;
3184 wc.status = IB_WC_LOC_PROT_ERR;
3186 /* responder goes to error state */
3187 rvt_rc_error(qp, wc.status);
3190 spin_unlock_irqrestore(&qp->r_lock, flags);
3192 spin_lock_irqsave(&sqp->s_lock, flags);
3193 spin_lock(&sqp->r_lock);
3194 rvt_send_complete(sqp, wqe, send_status);
3195 spin_unlock(&sqp->r_lock);
3196 if (sqp->ibqp.qp_type == IB_QPT_RC) {
3199 spin_lock(&sqp->r_lock);
3200 lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
3201 spin_unlock(&sqp->r_lock);
3203 sqp->s_flags &= ~RVT_S_BUSY;
3204 spin_unlock_irqrestore(&sqp->s_lock, flags);
3208 ev.device = sqp->ibqp.device;
3209 ev.element.qp = &sqp->ibqp;
3210 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
3211 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
3216 sqp->s_flags &= ~RVT_S_BUSY;
3218 spin_unlock_irqrestore(&sqp->s_lock, flags);
3222 EXPORT_SYMBOL(rvt_ruc_loopback);