2 * Copyright(c) 2016, 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
60 static void rvt_rc_timeout(struct timer_list *t);
61 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
62 enum ib_qp_type type);
65 * Convert the AETH RNR timeout code into the number of microseconds.
67 static const u32 ib_rvt_rnr_table[32] = {
68 655360, /* 00: 655.36 */
88 10240, /* 14: 10.24 */
89 15360, /* 15: 15.36 */
90 20480, /* 16: 20.48 */
91 30720, /* 17: 30.72 */
92 40960, /* 18: 40.96 */
93 61440, /* 19: 61.44 */
94 81920, /* 1A: 81.92 */
95 122880, /* 1B: 122.88 */
96 163840, /* 1C: 163.84 */
97 245760, /* 1D: 245.76 */
98 327680, /* 1E: 327.68 */
99 491520 /* 1F: 491.52 */
103 * Note that it is OK to post send work requests in the SQE and ERR
104 * states; rvt_do_send() will process them and generate error
105 * completions as per IB 1.2 C10-96.
107 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
109 [IB_QPS_INIT] = RVT_POST_RECV_OK,
110 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
111 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
112 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
113 RVT_PROCESS_NEXT_SEND_OK,
114 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
115 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
116 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
117 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
118 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
119 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
121 EXPORT_SYMBOL(ib_rvt_state_ops);
123 static void get_map_page(struct rvt_qpn_table *qpt,
124 struct rvt_qpn_map *map)
126 unsigned long page = get_zeroed_page(GFP_KERNEL);
129 * Free the page if someone raced with us installing it.
132 spin_lock(&qpt->lock);
136 map->page = (void *)page;
137 spin_unlock(&qpt->lock);
141 * init_qpn_table - initialize the QP number table for a device
142 * @qpt: the QPN table
144 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
147 struct rvt_qpn_map *map;
150 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
153 spin_lock_init(&qpt->lock);
155 qpt->last = rdi->dparms.qpn_start;
156 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
159 * Drivers may want some QPs beyond what we need for verbs let them use
160 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
161 * for those. The reserved range must be *after* the range which verbs
165 /* Figure out number of bit maps needed before reserved range */
166 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
168 /* This should always be zero */
169 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
171 /* Starting with the first reserved bit map */
172 map = &qpt->map[qpt->nmaps];
174 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
175 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
176 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
178 get_map_page(qpt, map);
184 set_bit(offset, map->page);
186 if (offset == RVT_BITS_PER_PAGE) {
197 * free_qpn_table - free the QP number table for a device
198 * @qpt: the QPN table
200 static void free_qpn_table(struct rvt_qpn_table *qpt)
204 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
205 free_page((unsigned long)qpt->map[i].page);
209 * rvt_driver_qp_init - Init driver qp resources
210 * @rdi: rvt dev strucutre
212 * Return: 0 on success
214 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
219 if (!rdi->dparms.qp_table_size)
223 * If driver is not doing any QP allocation then make sure it is
224 * providing the necessary QP functions.
226 if (!rdi->driver_f.free_all_qps ||
227 !rdi->driver_f.qp_priv_alloc ||
228 !rdi->driver_f.qp_priv_free ||
229 !rdi->driver_f.notify_qp_reset ||
230 !rdi->driver_f.notify_restart_rc)
233 /* allocate parent object */
234 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
239 /* allocate hash table */
240 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
241 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
242 rdi->qp_dev->qp_table =
243 kmalloc_array_node(rdi->qp_dev->qp_table_size,
244 sizeof(*rdi->qp_dev->qp_table),
245 GFP_KERNEL, rdi->dparms.node);
246 if (!rdi->qp_dev->qp_table)
249 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
250 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
252 spin_lock_init(&rdi->qp_dev->qpt_lock);
254 /* initialize qpn map */
255 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
258 spin_lock_init(&rdi->n_qps_lock);
263 kfree(rdi->qp_dev->qp_table);
264 free_qpn_table(&rdi->qp_dev->qpn_table);
273 * rvt_free_qp_cb - callback function to reset a qp
274 * @qp: the qp to reset
277 * This function resets the qp and removes it from the
280 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
282 unsigned int *qp_inuse = (unsigned int *)v;
283 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
285 /* Reset the qp and remove it from the qp hash list */
286 rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
288 /* Increment the qp_inuse count */
293 * rvt_free_all_qps - check for QPs still in use
294 * @rdi: rvt device info structure
296 * There should not be any QPs still in use.
297 * Free memory for table.
298 * Return the number of QPs still in use.
300 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
302 unsigned int qp_inuse = 0;
304 qp_inuse += rvt_mcast_tree_empty(rdi);
306 rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
312 * rvt_qp_exit - clean up qps on device exit
313 * @rdi: rvt dev structure
315 * Check for qp leaks and free resources.
317 void rvt_qp_exit(struct rvt_dev_info *rdi)
319 u32 qps_inuse = rvt_free_all_qps(rdi);
322 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
327 kfree(rdi->qp_dev->qp_table);
328 free_qpn_table(&rdi->qp_dev->qpn_table);
332 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
333 struct rvt_qpn_map *map, unsigned off)
335 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
339 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
340 * IB_QPT_SMI/IB_QPT_GSI
341 * @rdi: rvt device info structure
342 * @qpt: queue pair number table pointer
343 * @port_num: IB port number, 1 based, comes from core
345 * Return: The queue pair number
347 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
348 enum ib_qp_type type, u8 port_num)
350 u32 i, offset, max_scan, qpn;
351 struct rvt_qpn_map *map;
354 if (rdi->driver_f.alloc_qpn)
355 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
357 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
360 ret = type == IB_QPT_GSI;
361 n = 1 << (ret + 2 * (port_num - 1));
362 spin_lock(&qpt->lock);
367 spin_unlock(&qpt->lock);
371 qpn = qpt->last + qpt->incr;
372 if (qpn >= RVT_QPN_MAX)
373 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
374 /* offset carries bit 0 */
375 offset = qpn & RVT_BITS_PER_PAGE_MASK;
376 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
377 max_scan = qpt->nmaps - !offset;
379 if (unlikely(!map->page)) {
380 get_map_page(qpt, map);
381 if (unlikely(!map->page))
385 if (!test_and_set_bit(offset, map->page)) {
392 * This qpn might be bogus if offset >= BITS_PER_PAGE.
393 * That is OK. It gets re-assigned below
395 qpn = mk_qpn(qpt, map, offset);
396 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
398 * In order to keep the number of pages allocated to a
399 * minimum, we scan the all existing pages before increasing
400 * the size of the bitmap table.
402 if (++i > max_scan) {
403 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
405 map = &qpt->map[qpt->nmaps++];
406 /* start at incr with current bit 0 */
407 offset = qpt->incr | (offset & 1);
408 } else if (map < &qpt->map[qpt->nmaps]) {
410 /* start at incr with current bit 0 */
411 offset = qpt->incr | (offset & 1);
414 /* wrap to first map page, invert bit 0 */
415 offset = qpt->incr | ((offset & 1) ^ 1);
417 /* there can be no set bits in low-order QoS bits */
418 WARN_ON(rdi->dparms.qos_shift > 1 &&
419 offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
420 qpn = mk_qpn(qpt, map, offset);
430 * rvt_clear_mr_refs - Drop help mr refs
431 * @qp: rvt qp data structure
432 * @clr_sends: If shoudl clear send side or not
434 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
437 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
439 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
440 rvt_put_ss(&qp->s_rdma_read_sge);
442 rvt_put_ss(&qp->r_sge);
445 while (qp->s_last != qp->s_head) {
446 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
450 if (qp->ibqp.qp_type == IB_QPT_UD ||
451 qp->ibqp.qp_type == IB_QPT_SMI ||
452 qp->ibqp.qp_type == IB_QPT_GSI)
453 atomic_dec(&ibah_to_rvtah(
454 wqe->ud_wr.ah)->refcount);
455 if (++qp->s_last >= qp->s_size)
457 smp_wmb(); /* see qp_set_savail */
460 rvt_put_mr(qp->s_rdma_mr);
461 qp->s_rdma_mr = NULL;
465 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
466 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
468 if (e->rdma_sge.mr) {
469 rvt_put_mr(e->rdma_sge.mr);
470 e->rdma_sge.mr = NULL;
476 * rvt_swqe_has_lkey - return true if lkey is used by swqe
477 * @wqe - the send wqe
480 * Test the swqe for using lkey
482 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
486 for (i = 0; i < wqe->wr.num_sge; i++) {
487 struct rvt_sge *sge = &wqe->sg_list[i];
489 if (rvt_mr_has_lkey(sge->mr, lkey))
496 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
500 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
502 u32 s_last = qp->s_last;
504 while (s_last != qp->s_head) {
505 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
507 if (rvt_swqe_has_lkey(wqe, lkey))
510 if (++s_last >= qp->s_size)
514 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
520 * rvt_qp_acks_has_lkey - return true if acks have lkey
524 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
527 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
529 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
530 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
532 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
539 * rvt_qp_mr_clean - clean up remote ops for lkey
541 * @lkey - the lkey that is being de-registered
543 * This routine checks if the lkey is being used by
546 * If so, the qp is put into an error state to elminate
547 * any references from the qp.
549 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
551 bool lastwqe = false;
553 if (qp->ibqp.qp_type == IB_QPT_SMI ||
554 qp->ibqp.qp_type == IB_QPT_GSI)
555 /* avoid special QPs */
557 spin_lock_irq(&qp->r_lock);
558 spin_lock(&qp->s_hlock);
559 spin_lock(&qp->s_lock);
561 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
564 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
565 rvt_qp_sends_has_lkey(qp, lkey) ||
566 rvt_qp_acks_has_lkey(qp, lkey))
567 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
569 spin_unlock(&qp->s_lock);
570 spin_unlock(&qp->s_hlock);
571 spin_unlock_irq(&qp->r_lock);
575 ev.device = qp->ibqp.device;
576 ev.element.qp = &qp->ibqp;
577 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
578 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
583 * rvt_remove_qp - remove qp form table
584 * @rdi: rvt dev struct
587 * Remove the QP from the table so it can't be found asynchronously by
588 * the receive routine.
590 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
592 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
593 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
597 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
599 if (rcu_dereference_protected(rvp->qp[0],
600 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
601 RCU_INIT_POINTER(rvp->qp[0], NULL);
602 } else if (rcu_dereference_protected(rvp->qp[1],
603 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
604 RCU_INIT_POINTER(rvp->qp[1], NULL);
607 struct rvt_qp __rcu **qpp;
610 qpp = &rdi->qp_dev->qp_table[n];
611 for (; (q = rcu_dereference_protected(*qpp,
612 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
615 RCU_INIT_POINTER(*qpp,
616 rcu_dereference_protected(qp->next,
617 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
619 trace_rvt_qpremove(qp, n);
625 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
633 * rvt_init_qp - initialize the QP state to the reset state
634 * @qp: the QP to init or reinit
637 * This function is called from both rvt_create_qp() and
638 * rvt_reset_qp(). The difference is that the reset
639 * patch the necessary locks to protect against concurent
642 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
643 enum ib_qp_type type)
647 qp->qp_access_flags = 0;
648 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
654 qp->s_sending_psn = 0;
655 qp->s_sending_hpsn = 0;
659 if (type == IB_QPT_RC) {
660 qp->s_state = IB_OPCODE_RC_SEND_LAST;
661 qp->r_state = IB_OPCODE_RC_SEND_LAST;
663 qp->s_state = IB_OPCODE_UC_SEND_LAST;
664 qp->r_state = IB_OPCODE_UC_SEND_LAST;
666 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
677 qp->s_mig_state = IB_MIG_MIGRATED;
678 qp->r_head_ack_queue = 0;
679 qp->s_tail_ack_queue = 0;
680 qp->s_num_rd_atomic = 0;
682 qp->r_rq.wq->head = 0;
683 qp->r_rq.wq->tail = 0;
685 qp->r_sge.num_sge = 0;
686 atomic_set(&qp->s_reserved_used, 0);
690 * _rvt_reset_qp - initialize the QP state to the reset state
691 * @qp: the QP to reset
694 * r_lock, s_hlock, and s_lock are required to be held by the caller
696 static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
697 enum ib_qp_type type)
698 __must_hold(&qp->s_lock)
699 __must_hold(&qp->s_hlock)
700 __must_hold(&qp->r_lock)
702 lockdep_assert_held(&qp->r_lock);
703 lockdep_assert_held(&qp->s_hlock);
704 lockdep_assert_held(&qp->s_lock);
705 if (qp->state != IB_QPS_RESET) {
706 qp->state = IB_QPS_RESET;
708 /* Let drivers flush their waitlist */
709 rdi->driver_f.flush_qp_waiters(qp);
710 rvt_stop_rc_timers(qp);
711 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
712 spin_unlock(&qp->s_lock);
713 spin_unlock(&qp->s_hlock);
714 spin_unlock_irq(&qp->r_lock);
716 /* Stop the send queue and the retry timer */
717 rdi->driver_f.stop_send_queue(qp);
718 rvt_del_timers_sync(qp);
719 /* Wait for things to stop */
720 rdi->driver_f.quiesce_qp(qp);
722 /* take qp out the hash and wait for it to be unused */
723 rvt_remove_qp(rdi, qp);
725 /* grab the lock b/c it was locked at call time */
726 spin_lock_irq(&qp->r_lock);
727 spin_lock(&qp->s_hlock);
728 spin_lock(&qp->s_lock);
730 rvt_clear_mr_refs(qp, 1);
732 * Let the driver do any tear down or re-init it needs to for
733 * a qp that has been reset
735 rdi->driver_f.notify_qp_reset(qp);
737 rvt_init_qp(rdi, qp, type);
738 lockdep_assert_held(&qp->r_lock);
739 lockdep_assert_held(&qp->s_hlock);
740 lockdep_assert_held(&qp->s_lock);
744 * rvt_reset_qp - initialize the QP state to the reset state
745 * @rdi: the device info
746 * @qp: the QP to reset
749 * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
750 * before calling _rvt_reset_qp().
752 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
753 enum ib_qp_type type)
755 spin_lock_irq(&qp->r_lock);
756 spin_lock(&qp->s_hlock);
757 spin_lock(&qp->s_lock);
758 _rvt_reset_qp(rdi, qp, type);
759 spin_unlock(&qp->s_lock);
760 spin_unlock(&qp->s_hlock);
761 spin_unlock_irq(&qp->r_lock);
764 /** rvt_free_qpn - Free a qpn from the bit map
766 * @qpn: queue pair number to free
768 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
770 struct rvt_qpn_map *map;
772 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
774 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
778 * rvt_create_qp - create a queue pair for a device
779 * @ibpd: the protection domain who's device we create the queue pair for
780 * @init_attr: the attributes of the queue pair
781 * @udata: user data for libibverbs.so
783 * Queue pair creation is mostly an rvt issue. However, drivers have their own
784 * unique idea of what queue pair numbers mean. For instance there is a reserved
787 * Return: the queue pair on success, otherwise returns an errno.
789 * Called by the ib_create_qp() core verbs function.
791 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
792 struct ib_qp_init_attr *init_attr,
793 struct ib_udata *udata)
797 struct rvt_swqe *swq = NULL;
800 struct ib_qp *ret = ERR_PTR(-ENOMEM);
801 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
806 return ERR_PTR(-EINVAL);
808 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_send_sge ||
809 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
810 init_attr->create_flags)
811 return ERR_PTR(-EINVAL);
813 /* Check receive queue parameters if no SRQ is specified. */
814 if (!init_attr->srq) {
815 if (init_attr->cap.max_recv_sge >
816 rdi->dparms.props.max_recv_sge ||
817 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
818 return ERR_PTR(-EINVAL);
820 if (init_attr->cap.max_send_sge +
821 init_attr->cap.max_send_wr +
822 init_attr->cap.max_recv_sge +
823 init_attr->cap.max_recv_wr == 0)
824 return ERR_PTR(-EINVAL);
827 init_attr->cap.max_send_wr + 1 +
828 rdi->dparms.reserved_operations;
829 switch (init_attr->qp_type) {
832 if (init_attr->port_num == 0 ||
833 init_attr->port_num > ibpd->device->phys_port_cnt)
834 return ERR_PTR(-EINVAL);
839 sz = sizeof(struct rvt_sge) *
840 init_attr->cap.max_send_sge +
841 sizeof(struct rvt_swqe);
842 swq = vzalloc_node(array_size(sz, sqsize), rdi->dparms.node);
844 return ERR_PTR(-ENOMEM);
848 if (init_attr->srq) {
849 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
851 if (srq->rq.max_sge > 1)
852 sg_list_sz = sizeof(*qp->r_sg_list) *
853 (srq->rq.max_sge - 1);
854 } else if (init_attr->cap.max_recv_sge > 1)
855 sg_list_sz = sizeof(*qp->r_sg_list) *
856 (init_attr->cap.max_recv_sge - 1);
857 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
862 RCU_INIT_POINTER(qp->next, NULL);
863 if (init_attr->qp_type == IB_QPT_RC) {
865 kcalloc_node(rvt_max_atomic(rdi),
866 sizeof(*qp->s_ack_queue),
869 if (!qp->s_ack_queue)
872 /* initialize timers needed for rc qp */
873 timer_setup(&qp->s_timer, rvt_rc_timeout, 0);
874 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
876 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
879 * Driver needs to set up it's private QP structure and do any
880 * initialization that is needed.
882 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
888 qp->timeout_jiffies =
889 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
891 if (init_attr->srq) {
894 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
895 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
896 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
897 sizeof(struct rvt_rwqe);
899 qp->r_rq.wq = vmalloc_user(
900 sizeof(struct rvt_rwq) +
903 qp->r_rq.wq = vzalloc_node(
904 sizeof(struct rvt_rwq) +
908 goto bail_driver_priv;
912 * ib_create_qp() will initialize qp->ibqp
913 * except for qp->ibqp.qp_num.
915 spin_lock_init(&qp->r_lock);
916 spin_lock_init(&qp->s_hlock);
917 spin_lock_init(&qp->s_lock);
918 spin_lock_init(&qp->r_rq.lock);
919 atomic_set(&qp->refcount, 0);
920 atomic_set(&qp->local_ops_pending, 0);
921 init_waitqueue_head(&qp->wait);
922 INIT_LIST_HEAD(&qp->rspwait);
923 qp->state = IB_QPS_RESET;
926 qp->s_avail = init_attr->cap.max_send_wr;
927 qp->s_max_sge = init_attr->cap.max_send_sge;
928 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
929 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
931 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
933 init_attr->port_num);
938 qp->ibqp.qp_num = err;
939 qp->port_num = init_attr->port_num;
940 rvt_init_qp(rdi, qp, init_attr->qp_type);
944 /* Don't support raw QPs */
945 return ERR_PTR(-EINVAL);
948 init_attr->cap.max_inline_data = 0;
951 * Return the address of the RWQ as the offset to mmap.
952 * See rvt_mmap() for details.
954 if (udata && udata->outlen >= sizeof(__u64)) {
958 err = ib_copy_to_udata(udata, &offset,
965 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
967 qp->ip = rvt_create_mmap_info(rdi, s,
968 ibpd->uobject->context,
971 ret = ERR_PTR(-ENOMEM);
975 err = ib_copy_to_udata(udata, &qp->ip->offset,
976 sizeof(qp->ip->offset));
982 qp->pid = current->pid;
985 spin_lock(&rdi->n_qps_lock);
986 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
987 spin_unlock(&rdi->n_qps_lock);
988 ret = ERR_PTR(-ENOMEM);
992 rdi->n_qps_allocated++;
994 * Maintain a busy_jiffies variable that will be added to the timeout
995 * period in mod_retry_timer and add_retry_timer. This busy jiffies
996 * is scaled by the number of rc qps created for the device to reduce
997 * the number of timeouts occurring when there is a large number of
998 * qps. busy_jiffies is incremented every rc qp scaling interval.
999 * The scaling interval is selected based on extensive performance
1000 * evaluation of targeted workloads.
1002 if (init_attr->qp_type == IB_QPT_RC) {
1004 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1006 spin_unlock(&rdi->n_qps_lock);
1009 spin_lock_irq(&rdi->pending_lock);
1010 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
1011 spin_unlock_irq(&rdi->pending_lock);
1017 * We have our QP and its good, now keep track of what types of opcodes
1018 * can be processed on this QP. We do this by keeping track of what the
1019 * 3 high order bits of the opcode are.
1021 switch (init_attr->qp_type) {
1025 qp->allowed_ops = IB_OPCODE_UD;
1028 qp->allowed_ops = IB_OPCODE_RC;
1031 qp->allowed_ops = IB_OPCODE_UC;
1034 ret = ERR_PTR(-EINVAL);
1042 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1045 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1052 rdi->driver_f.qp_priv_free(rdi, qp);
1055 kfree(qp->s_ack_queue);
1065 * rvt_error_qp - put a QP into the error state
1066 * @qp: the QP to put into the error state
1067 * @err: the receive completion error to signal if a RWQE is active
1069 * Flushes both send and receive work queues.
1071 * Return: true if last WQE event should be generated.
1072 * The QP r_lock and s_lock should be held and interrupts disabled.
1073 * If we are already in error state, just return.
1075 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1079 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1081 lockdep_assert_held(&qp->r_lock);
1082 lockdep_assert_held(&qp->s_lock);
1083 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1086 qp->state = IB_QPS_ERR;
1088 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1089 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1090 del_timer(&qp->s_timer);
1093 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1094 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1096 rdi->driver_f.notify_error_qp(qp);
1098 /* Schedule the sending tasklet to drain the send work queue. */
1099 if (READ_ONCE(qp->s_last) != qp->s_head)
1100 rdi->driver_f.schedule_send(qp);
1102 rvt_clear_mr_refs(qp, 0);
1104 memset(&wc, 0, sizeof(wc));
1106 wc.opcode = IB_WC_RECV;
1108 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1109 wc.wr_id = qp->r_wr_id;
1111 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1113 wc.status = IB_WC_WR_FLUSH_ERR;
1120 spin_lock(&qp->r_rq.lock);
1122 /* sanity check pointers before trusting them */
1125 if (head >= qp->r_rq.size)
1128 if (tail >= qp->r_rq.size)
1130 while (tail != head) {
1131 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1132 if (++tail >= qp->r_rq.size)
1134 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1138 spin_unlock(&qp->r_rq.lock);
1139 } else if (qp->ibqp.event_handler) {
1146 EXPORT_SYMBOL(rvt_error_qp);
1149 * Put the QP into the hash table.
1150 * The hash table holds a reference to the QP.
1152 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1154 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1155 unsigned long flags;
1158 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1160 if (qp->ibqp.qp_num <= 1) {
1161 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1163 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1165 qp->next = rdi->qp_dev->qp_table[n];
1166 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1167 trace_rvt_qpinsert(qp, n);
1170 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1174 * rvt_modify_qp - modify the attributes of a queue pair
1175 * @ibqp: the queue pair who's attributes we're modifying
1176 * @attr: the new attributes
1177 * @attr_mask: the mask of attributes to modify
1178 * @udata: user data for libibverbs.so
1180 * Return: 0 on success, otherwise returns an errno.
1182 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1183 int attr_mask, struct ib_udata *udata)
1185 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1186 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1187 enum ib_qp_state cur_state, new_state;
1191 int pmtu = 0; /* for gcc warning only */
1192 enum rdma_link_layer link;
1195 link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
1197 spin_lock_irq(&qp->r_lock);
1198 spin_lock(&qp->s_hlock);
1199 spin_lock(&qp->s_lock);
1201 cur_state = attr_mask & IB_QP_CUR_STATE ?
1202 attr->cur_qp_state : qp->state;
1203 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1204 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1206 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1210 if (rdi->driver_f.check_modify_qp &&
1211 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1214 if (attr_mask & IB_QP_AV) {
1216 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1217 opa_get_mcast_base(OPA_MCAST_NR))
1220 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1221 be16_to_cpu(IB_MULTICAST_LID_BASE))
1225 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1229 if (attr_mask & IB_QP_ALT_PATH) {
1231 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1232 opa_get_mcast_base(OPA_MCAST_NR))
1235 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1236 be16_to_cpu(IB_MULTICAST_LID_BASE))
1240 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1242 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1246 if (attr_mask & IB_QP_PKEY_INDEX)
1247 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1250 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1251 if (attr->min_rnr_timer > 31)
1254 if (attr_mask & IB_QP_PORT)
1255 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1256 qp->ibqp.qp_type == IB_QPT_GSI ||
1257 attr->port_num == 0 ||
1258 attr->port_num > ibqp->device->phys_port_cnt)
1261 if (attr_mask & IB_QP_DEST_QPN)
1262 if (attr->dest_qp_num > RVT_QPN_MASK)
1265 if (attr_mask & IB_QP_RETRY_CNT)
1266 if (attr->retry_cnt > 7)
1269 if (attr_mask & IB_QP_RNR_RETRY)
1270 if (attr->rnr_retry > 7)
1274 * Don't allow invalid path_mtu values. OK to set greater
1275 * than the active mtu (or even the max_cap, if we have tuned
1276 * that to a small mtu. We'll set qp->path_mtu
1277 * to the lesser of requested attribute mtu and active,
1278 * for packetizing messages.
1279 * Note that the QP port has to be set in INIT and MTU in RTR.
1281 if (attr_mask & IB_QP_PATH_MTU) {
1282 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1287 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1288 if (attr->path_mig_state == IB_MIG_REARM) {
1289 if (qp->s_mig_state == IB_MIG_ARMED)
1291 if (new_state != IB_QPS_RTS)
1293 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1294 if (qp->s_mig_state == IB_MIG_REARM)
1296 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1298 if (qp->s_mig_state == IB_MIG_ARMED)
1305 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1306 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1309 switch (new_state) {
1311 if (qp->state != IB_QPS_RESET)
1312 _rvt_reset_qp(rdi, qp, ibqp->qp_type);
1316 /* Allow event to re-trigger if QP set to RTR more than once */
1317 qp->r_flags &= ~RVT_R_COMM_EST;
1318 qp->state = new_state;
1322 qp->s_draining = qp->s_last != qp->s_cur;
1323 qp->state = new_state;
1327 if (qp->ibqp.qp_type == IB_QPT_RC)
1329 qp->state = new_state;
1333 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1337 qp->state = new_state;
1341 if (attr_mask & IB_QP_PKEY_INDEX)
1342 qp->s_pkey_index = attr->pkey_index;
1344 if (attr_mask & IB_QP_PORT)
1345 qp->port_num = attr->port_num;
1347 if (attr_mask & IB_QP_DEST_QPN)
1348 qp->remote_qpn = attr->dest_qp_num;
1350 if (attr_mask & IB_QP_SQ_PSN) {
1351 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1352 qp->s_psn = qp->s_next_psn;
1353 qp->s_sending_psn = qp->s_next_psn;
1354 qp->s_last_psn = qp->s_next_psn - 1;
1355 qp->s_sending_hpsn = qp->s_last_psn;
1358 if (attr_mask & IB_QP_RQ_PSN)
1359 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1361 if (attr_mask & IB_QP_ACCESS_FLAGS)
1362 qp->qp_access_flags = attr->qp_access_flags;
1364 if (attr_mask & IB_QP_AV) {
1365 rdma_replace_ah_attr(&qp->remote_ah_attr, &attr->ah_attr);
1366 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1367 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1370 if (attr_mask & IB_QP_ALT_PATH) {
1371 rdma_replace_ah_attr(&qp->alt_ah_attr, &attr->alt_ah_attr);
1372 qp->s_alt_pkey_index = attr->alt_pkey_index;
1375 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1376 qp->s_mig_state = attr->path_mig_state;
1378 qp->remote_ah_attr = qp->alt_ah_attr;
1379 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1380 qp->s_pkey_index = qp->s_alt_pkey_index;
1384 if (attr_mask & IB_QP_PATH_MTU) {
1385 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1386 qp->log_pmtu = ilog2(qp->pmtu);
1389 if (attr_mask & IB_QP_RETRY_CNT) {
1390 qp->s_retry_cnt = attr->retry_cnt;
1391 qp->s_retry = attr->retry_cnt;
1394 if (attr_mask & IB_QP_RNR_RETRY) {
1395 qp->s_rnr_retry_cnt = attr->rnr_retry;
1396 qp->s_rnr_retry = attr->rnr_retry;
1399 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1400 qp->r_min_rnr_timer = attr->min_rnr_timer;
1402 if (attr_mask & IB_QP_TIMEOUT) {
1403 qp->timeout = attr->timeout;
1404 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1407 if (attr_mask & IB_QP_QKEY)
1408 qp->qkey = attr->qkey;
1410 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1411 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1413 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1414 qp->s_max_rd_atomic = attr->max_rd_atomic;
1416 if (rdi->driver_f.modify_qp)
1417 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1419 spin_unlock(&qp->s_lock);
1420 spin_unlock(&qp->s_hlock);
1421 spin_unlock_irq(&qp->r_lock);
1423 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1424 rvt_insert_qp(rdi, qp);
1427 ev.device = qp->ibqp.device;
1428 ev.element.qp = &qp->ibqp;
1429 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1430 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1433 ev.device = qp->ibqp.device;
1434 ev.element.qp = &qp->ibqp;
1435 ev.event = IB_EVENT_PATH_MIG;
1436 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1441 spin_unlock(&qp->s_lock);
1442 spin_unlock(&qp->s_hlock);
1443 spin_unlock_irq(&qp->r_lock);
1448 * rvt_destroy_qp - destroy a queue pair
1449 * @ibqp: the queue pair to destroy
1451 * Note that this can be called while the QP is actively sending or
1454 * Return: 0 on success.
1456 int rvt_destroy_qp(struct ib_qp *ibqp)
1458 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1459 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1461 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1463 wait_event(qp->wait, !atomic_read(&qp->refcount));
1464 /* qpn is now available for use again */
1465 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1467 spin_lock(&rdi->n_qps_lock);
1468 rdi->n_qps_allocated--;
1469 if (qp->ibqp.qp_type == IB_QPT_RC) {
1471 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1473 spin_unlock(&rdi->n_qps_lock);
1476 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1480 rdi->driver_f.qp_priv_free(rdi, qp);
1481 kfree(qp->s_ack_queue);
1482 rdma_destroy_ah_attr(&qp->remote_ah_attr);
1483 rdma_destroy_ah_attr(&qp->alt_ah_attr);
1489 * rvt_query_qp - query an ipbq
1490 * @ibqp: IB qp to query
1491 * @attr: attr struct to fill in
1492 * @attr_mask: attr mask ignored
1493 * @init_attr: struct to fill in
1497 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1498 int attr_mask, struct ib_qp_init_attr *init_attr)
1500 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1501 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1503 attr->qp_state = qp->state;
1504 attr->cur_qp_state = attr->qp_state;
1505 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1506 attr->path_mig_state = qp->s_mig_state;
1507 attr->qkey = qp->qkey;
1508 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1509 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1510 attr->dest_qp_num = qp->remote_qpn;
1511 attr->qp_access_flags = qp->qp_access_flags;
1512 attr->cap.max_send_wr = qp->s_size - 1 -
1513 rdi->dparms.reserved_operations;
1514 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1515 attr->cap.max_send_sge = qp->s_max_sge;
1516 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1517 attr->cap.max_inline_data = 0;
1518 attr->ah_attr = qp->remote_ah_attr;
1519 attr->alt_ah_attr = qp->alt_ah_attr;
1520 attr->pkey_index = qp->s_pkey_index;
1521 attr->alt_pkey_index = qp->s_alt_pkey_index;
1522 attr->en_sqd_async_notify = 0;
1523 attr->sq_draining = qp->s_draining;
1524 attr->max_rd_atomic = qp->s_max_rd_atomic;
1525 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1526 attr->min_rnr_timer = qp->r_min_rnr_timer;
1527 attr->port_num = qp->port_num;
1528 attr->timeout = qp->timeout;
1529 attr->retry_cnt = qp->s_retry_cnt;
1530 attr->rnr_retry = qp->s_rnr_retry_cnt;
1531 attr->alt_port_num =
1532 rdma_ah_get_port_num(&qp->alt_ah_attr);
1533 attr->alt_timeout = qp->alt_timeout;
1535 init_attr->event_handler = qp->ibqp.event_handler;
1536 init_attr->qp_context = qp->ibqp.qp_context;
1537 init_attr->send_cq = qp->ibqp.send_cq;
1538 init_attr->recv_cq = qp->ibqp.recv_cq;
1539 init_attr->srq = qp->ibqp.srq;
1540 init_attr->cap = attr->cap;
1541 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1542 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1544 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1545 init_attr->qp_type = qp->ibqp.qp_type;
1546 init_attr->port_num = qp->port_num;
1551 * rvt_post_receive - post a receive on a QP
1552 * @ibqp: the QP to post the receive on
1553 * @wr: the WR to post
1554 * @bad_wr: the first bad WR is put here
1556 * This may be called from interrupt context.
1558 * Return: 0 on success otherwise errno
1560 int rvt_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1561 const struct ib_recv_wr **bad_wr)
1563 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1564 struct rvt_rwq *wq = qp->r_rq.wq;
1565 unsigned long flags;
1566 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1569 /* Check that state is OK to post receive. */
1570 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1575 for (; wr; wr = wr->next) {
1576 struct rvt_rwqe *wqe;
1580 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1585 spin_lock_irqsave(&qp->r_rq.lock, flags);
1586 next = wq->head + 1;
1587 if (next >= qp->r_rq.size)
1589 if (next == wq->tail) {
1590 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1594 if (unlikely(qp_err_flush)) {
1597 memset(&wc, 0, sizeof(wc));
1599 wc.opcode = IB_WC_RECV;
1600 wc.wr_id = wr->wr_id;
1601 wc.status = IB_WC_WR_FLUSH_ERR;
1602 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1604 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1605 wqe->wr_id = wr->wr_id;
1606 wqe->num_sge = wr->num_sge;
1607 for (i = 0; i < wr->num_sge; i++)
1608 wqe->sg_list[i] = wr->sg_list[i];
1610 * Make sure queue entry is written
1611 * before the head index.
1616 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1622 * rvt_qp_valid_operation - validate post send wr request
1624 * @post-parms - the post send table for the driver
1625 * @wr - the work request
1627 * The routine validates the operation based on the
1628 * validation table an returns the length of the operation
1629 * which can extend beyond the ib_send_bw. Operation
1630 * dependent flags key atomic operation validation.
1632 * There is an exception for UD qps that validates the pd and
1633 * overrides the length to include the additional UD specific
1636 * Returns a negative error or the length of the work request
1637 * for building the swqe.
1639 static inline int rvt_qp_valid_operation(
1641 const struct rvt_operation_params *post_parms,
1642 const struct ib_send_wr *wr)
1646 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1648 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1650 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1651 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1653 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1654 (wr->num_sge == 0 ||
1655 wr->sg_list[0].length < sizeof(u64) ||
1656 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1658 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1659 !qp->s_max_rd_atomic)
1661 len = post_parms[wr->opcode].length;
1663 if (qp->ibqp.qp_type != IB_QPT_UC &&
1664 qp->ibqp.qp_type != IB_QPT_RC) {
1665 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1667 len = sizeof(struct ib_ud_wr);
1673 * rvt_qp_is_avail - determine queue capacity
1675 * @rdi: the rdmavt device
1676 * @reserved_op: is reserved operation
1678 * This assumes the s_hlock is held but the s_last
1679 * qp variable is uncontrolled.
1681 * For non reserved operations, the qp->s_avail
1684 * The return value is zero or a -ENOMEM.
1686 static inline int rvt_qp_is_avail(
1688 struct rvt_dev_info *rdi,
1695 /* see rvt_qp_wqe_unreserve() */
1696 smp_mb__before_atomic();
1697 reserved_used = atomic_read(&qp->s_reserved_used);
1698 if (unlikely(reserved_op)) {
1699 /* see rvt_qp_wqe_unreserve() */
1700 smp_mb__before_atomic();
1701 if (reserved_used >= rdi->dparms.reserved_operations)
1705 /* non-reserved operations */
1706 if (likely(qp->s_avail))
1708 slast = READ_ONCE(qp->s_last);
1709 if (qp->s_head >= slast)
1710 avail = qp->s_size - (qp->s_head - slast);
1712 avail = slast - qp->s_head;
1714 /* see rvt_qp_wqe_unreserve() */
1715 smp_mb__before_atomic();
1716 reserved_used = atomic_read(&qp->s_reserved_used);
1718 (rdi->dparms.reserved_operations - reserved_used);
1719 /* insure we don't assign a negative s_avail */
1720 if ((s32)avail <= 0)
1722 qp->s_avail = avail;
1723 if (WARN_ON(qp->s_avail >
1724 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1726 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1727 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1728 qp->s_head, qp->s_tail, qp->s_cur,
1729 qp->s_acked, qp->s_last);
1734 * rvt_post_one_wr - post one RC, UC, or UD send work request
1735 * @qp: the QP to post on
1736 * @wr: the work request to send
1738 static int rvt_post_one_wr(struct rvt_qp *qp,
1739 const struct ib_send_wr *wr,
1742 struct rvt_swqe *wqe;
1747 struct rvt_lkey_table *rkt;
1749 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1754 int local_ops_delayed = 0;
1756 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1758 /* IB spec says that num_sge == 0 is OK. */
1759 if (unlikely(wr->num_sge > qp->s_max_sge))
1762 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1768 * Local operations include fast register and local invalidate.
1769 * Fast register needs to be processed immediately because the
1770 * registered lkey may be used by following work requests and the
1771 * lkey needs to be valid at the time those requests are posted.
1772 * Local invalidate can be processed immediately if fencing is
1773 * not required and no previous local invalidate ops are pending.
1774 * Signaled local operations that have been processed immediately
1775 * need to have requests with "completion only" flags set posted
1776 * to the send queue in order to generate completions.
1778 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
1779 switch (wr->opcode) {
1781 ret = rvt_fast_reg_mr(qp,
1784 reg_wr(wr)->access);
1785 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1788 case IB_WR_LOCAL_INV:
1789 if ((wr->send_flags & IB_SEND_FENCE) ||
1790 atomic_read(&qp->local_ops_pending)) {
1791 local_ops_delayed = 1;
1793 ret = rvt_invalidate_rkey(
1794 qp, wr->ex.invalidate_rkey);
1795 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1804 reserved_op = rdi->post_parms[wr->opcode].flags &
1805 RVT_OPERATION_USE_RESERVE;
1806 /* check for avail */
1807 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
1810 next = qp->s_head + 1;
1811 if (next >= qp->s_size)
1814 rkt = &rdi->lkey_table;
1815 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1816 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1818 /* cplen has length from above */
1819 memcpy(&wqe->wr, wr, cplen);
1824 struct rvt_sge *last_sge = NULL;
1826 acc = wr->opcode >= IB_WR_RDMA_READ ?
1827 IB_ACCESS_LOCAL_WRITE : 0;
1828 for (i = 0; i < wr->num_sge; i++) {
1829 u32 length = wr->sg_list[i].length;
1833 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
1834 &wr->sg_list[i], acc);
1835 if (unlikely(ret < 0))
1836 goto bail_inval_free;
1837 wqe->length += length;
1839 last_sge = &wqe->sg_list[j];
1842 wqe->wr.num_sge = j;
1845 /* general part of wqe valid - allow for driver checks */
1846 if (rdi->driver_f.check_send_wqe) {
1847 ret = rdi->driver_f.check_send_wqe(qp, wqe);
1849 goto bail_inval_free;
1854 log_pmtu = qp->log_pmtu;
1855 if (qp->ibqp.qp_type != IB_QPT_UC &&
1856 qp->ibqp.qp_type != IB_QPT_RC) {
1857 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
1859 log_pmtu = ah->log_pmtu;
1860 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1863 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
1864 if (local_ops_delayed)
1865 atomic_inc(&qp->local_ops_pending);
1867 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
1872 wqe->ssn = qp->s_ssn++;
1873 wqe->psn = qp->s_next_psn;
1874 wqe->lpsn = wqe->psn +
1876 ((wqe->length - 1) >> log_pmtu) :
1878 qp->s_next_psn = wqe->lpsn + 1;
1880 if (unlikely(reserved_op)) {
1881 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
1882 rvt_qp_wqe_reserve(qp, wqe);
1884 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
1887 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
1888 smp_wmb(); /* see request builders */
1894 /* release mr holds */
1896 struct rvt_sge *sge = &wqe->sg_list[--j];
1898 rvt_put_mr(sge->mr);
1904 * rvt_post_send - post a send on a QP
1905 * @ibqp: the QP to post the send on
1906 * @wr: the list of work requests to post
1907 * @bad_wr: the first bad WR is put here
1909 * This may be called from interrupt context.
1911 * Return: 0 on success else errno
1913 int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1914 const struct ib_send_wr **bad_wr)
1916 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1917 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1918 unsigned long flags = 0;
1923 spin_lock_irqsave(&qp->s_hlock, flags);
1926 * Ensure QP state is such that we can send. If not bail out early,
1927 * there is no need to do this every time we post a send.
1929 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1930 spin_unlock_irqrestore(&qp->s_hlock, flags);
1935 * If the send queue is empty, and we only have a single WR then just go
1936 * ahead and kick the send engine into gear. Otherwise we will always
1937 * just schedule the send to happen later.
1939 call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next;
1941 for (; wr; wr = wr->next) {
1942 err = rvt_post_one_wr(qp, wr, &call_send);
1943 if (unlikely(err)) {
1950 spin_unlock_irqrestore(&qp->s_hlock, flags);
1953 rdi->driver_f.do_send(qp);
1955 rdi->driver_f.schedule_send_no_lock(qp);
1961 * rvt_post_srq_receive - post a receive on a shared receive queue
1962 * @ibsrq: the SRQ to post the receive on
1963 * @wr: the list of work requests to post
1964 * @bad_wr: A pointer to the first WR to cause a problem is put here
1966 * This may be called from interrupt context.
1968 * Return: 0 on success else errno
1970 int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1971 const struct ib_recv_wr **bad_wr)
1973 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
1975 unsigned long flags;
1977 for (; wr; wr = wr->next) {
1978 struct rvt_rwqe *wqe;
1982 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
1987 spin_lock_irqsave(&srq->rq.lock, flags);
1989 next = wq->head + 1;
1990 if (next >= srq->rq.size)
1992 if (next == wq->tail) {
1993 spin_unlock_irqrestore(&srq->rq.lock, flags);
1998 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
1999 wqe->wr_id = wr->wr_id;
2000 wqe->num_sge = wr->num_sge;
2001 for (i = 0; i < wr->num_sge; i++)
2002 wqe->sg_list[i] = wr->sg_list[i];
2003 /* Make sure queue entry is written before the head index. */
2006 spin_unlock_irqrestore(&srq->rq.lock, flags);
2012 * Validate a RWQE and fill in the SGE state.
2015 static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
2019 struct rvt_lkey_table *rkt;
2021 struct rvt_sge_state *ss;
2022 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2024 rkt = &rdi->lkey_table;
2025 pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
2027 ss->sg_list = qp->r_sg_list;
2029 for (i = j = 0; i < wqe->num_sge; i++) {
2030 if (wqe->sg_list[i].length == 0)
2033 ret = rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
2034 NULL, &wqe->sg_list[i],
2035 IB_ACCESS_LOCAL_WRITE);
2036 if (unlikely(ret <= 0))
2038 qp->r_len += wqe->sg_list[i].length;
2042 ss->total_len = qp->r_len;
2047 struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
2049 rvt_put_mr(sge->mr);
2052 memset(&wc, 0, sizeof(wc));
2053 wc.wr_id = wqe->wr_id;
2054 wc.status = IB_WC_LOC_PROT_ERR;
2055 wc.opcode = IB_WC_RECV;
2057 /* Signal solicited completion event. */
2058 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
2063 * rvt_get_rwqe - copy the next RWQE into the QP's RWQE
2065 * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
2067 * Return -1 if there is a local error, 0 if no RWQE is available,
2068 * otherwise return 1.
2070 * Can be called from interrupt level.
2072 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
2074 unsigned long flags;
2077 struct rvt_srq *srq;
2078 struct rvt_rwqe *wqe;
2079 void (*handler)(struct ib_event *, void *);
2084 srq = ibsrq_to_rvtsrq(qp->ibqp.srq);
2085 handler = srq->ibsrq.event_handler;
2093 spin_lock_irqsave(&rq->lock, flags);
2094 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
2101 /* Validate tail before using it since it is user writable. */
2102 if (tail >= rq->size)
2104 if (unlikely(tail == wq->head)) {
2108 /* Make sure entry is read after head index is read. */
2110 wqe = rvt_get_rwqe_ptr(rq, tail);
2112 * Even though we update the tail index in memory, the verbs
2113 * consumer is not supposed to post more entries until a
2114 * completion is generated.
2116 if (++tail >= rq->size)
2119 if (!wr_id_only && !init_sge(qp, wqe)) {
2123 qp->r_wr_id = wqe->wr_id;
2126 set_bit(RVT_R_WRID_VALID, &qp->r_aflags);
2131 * Validate head pointer value and compute
2132 * the number of remaining WQEs.
2138 n += rq->size - tail;
2141 if (n < srq->limit) {
2145 spin_unlock_irqrestore(&rq->lock, flags);
2146 ev.device = qp->ibqp.device;
2147 ev.element.srq = qp->ibqp.srq;
2148 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
2149 handler(&ev, srq->ibsrq.srq_context);
2154 spin_unlock_irqrestore(&rq->lock, flags);
2158 EXPORT_SYMBOL(rvt_get_rwqe);
2161 * qp_comm_est - handle trap with QP established
2164 void rvt_comm_est(struct rvt_qp *qp)
2166 qp->r_flags |= RVT_R_COMM_EST;
2167 if (qp->ibqp.event_handler) {
2170 ev.device = qp->ibqp.device;
2171 ev.element.qp = &qp->ibqp;
2172 ev.event = IB_EVENT_COMM_EST;
2173 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2176 EXPORT_SYMBOL(rvt_comm_est);
2178 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2180 unsigned long flags;
2183 spin_lock_irqsave(&qp->s_lock, flags);
2184 lastwqe = rvt_error_qp(qp, err);
2185 spin_unlock_irqrestore(&qp->s_lock, flags);
2190 ev.device = qp->ibqp.device;
2191 ev.element.qp = &qp->ibqp;
2192 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2193 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2196 EXPORT_SYMBOL(rvt_rc_error);
2199 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2200 * @index - the index
2201 * return usec from an index into ib_rvt_rnr_table
2203 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2205 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2207 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2209 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2211 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2212 IB_AETH_CREDIT_MASK];
2216 * rvt_add_retry_timer - add/start a retry timer
2218 * add a retry timer on the QP
2220 void rvt_add_retry_timer(struct rvt_qp *qp)
2222 struct ib_qp *ibqp = &qp->ibqp;
2223 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2225 lockdep_assert_held(&qp->s_lock);
2226 qp->s_flags |= RVT_S_TIMER;
2227 /* 4.096 usec. * (1 << qp->timeout) */
2228 qp->s_timer.expires = jiffies + qp->timeout_jiffies +
2230 add_timer(&qp->s_timer);
2232 EXPORT_SYMBOL(rvt_add_retry_timer);
2235 * rvt_add_rnr_timer - add/start an rnr timer
2237 * @aeth - aeth of RNR timeout, simulated aeth for loopback
2238 * add an rnr timer on the QP
2240 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2244 lockdep_assert_held(&qp->s_lock);
2245 qp->s_flags |= RVT_S_WAIT_RNR;
2246 to = rvt_aeth_to_usec(aeth);
2247 trace_rvt_rnrnak_add(qp, to);
2248 hrtimer_start(&qp->s_rnr_timer,
2249 ns_to_ktime(1000 * to), HRTIMER_MODE_REL_PINNED);
2251 EXPORT_SYMBOL(rvt_add_rnr_timer);
2254 * rvt_stop_rc_timers - stop all timers
2256 * stop any pending timers
2258 void rvt_stop_rc_timers(struct rvt_qp *qp)
2260 lockdep_assert_held(&qp->s_lock);
2261 /* Remove QP from all timers */
2262 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2263 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2264 del_timer(&qp->s_timer);
2265 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2268 EXPORT_SYMBOL(rvt_stop_rc_timers);
2271 * rvt_stop_rnr_timer - stop an rnr timer
2274 * stop an rnr timer and return if the timer
2277 static void rvt_stop_rnr_timer(struct rvt_qp *qp)
2279 lockdep_assert_held(&qp->s_lock);
2280 /* Remove QP from rnr timer */
2281 if (qp->s_flags & RVT_S_WAIT_RNR) {
2282 qp->s_flags &= ~RVT_S_WAIT_RNR;
2283 trace_rvt_rnrnak_stop(qp, 0);
2288 * rvt_del_timers_sync - wait for any timeout routines to exit
2291 void rvt_del_timers_sync(struct rvt_qp *qp)
2293 del_timer_sync(&qp->s_timer);
2294 hrtimer_cancel(&qp->s_rnr_timer);
2296 EXPORT_SYMBOL(rvt_del_timers_sync);
2299 * This is called from s_timer for missing responses.
2301 static void rvt_rc_timeout(struct timer_list *t)
2303 struct rvt_qp *qp = from_timer(qp, t, s_timer);
2304 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2305 unsigned long flags;
2307 spin_lock_irqsave(&qp->r_lock, flags);
2308 spin_lock(&qp->s_lock);
2309 if (qp->s_flags & RVT_S_TIMER) {
2310 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2312 qp->s_flags &= ~RVT_S_TIMER;
2313 rvp->n_rc_timeouts++;
2314 del_timer(&qp->s_timer);
2315 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2316 if (rdi->driver_f.notify_restart_rc)
2317 rdi->driver_f.notify_restart_rc(qp,
2320 rdi->driver_f.schedule_send(qp);
2322 spin_unlock(&qp->s_lock);
2323 spin_unlock_irqrestore(&qp->r_lock, flags);
2327 * This is called from s_timer for RNR timeouts.
2329 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2331 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2332 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2333 unsigned long flags;
2335 spin_lock_irqsave(&qp->s_lock, flags);
2336 rvt_stop_rnr_timer(qp);
2337 trace_rvt_rnrnak_timeout(qp, 0);
2338 rdi->driver_f.schedule_send(qp);
2339 spin_unlock_irqrestore(&qp->s_lock, flags);
2340 return HRTIMER_NORESTART;
2342 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2345 * rvt_qp_iter_init - initial for QP iteration
2349 * This returns an iterator suitable for iterating QPs
2352 * The @cb is a user defined callback and @v is a 64
2353 * bit value passed to and relevant for processing in the
2354 * @cb. An example use case would be to alter QP processing
2355 * based on criteria not part of the rvt_qp.
2357 * Use cases that require memory allocation to succeed
2358 * must preallocate appropriately.
2360 * Return: a pointer to an rvt_qp_iter or NULL
2362 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2364 void (*cb)(struct rvt_qp *qp, u64 v))
2366 struct rvt_qp_iter *i;
2368 i = kzalloc(sizeof(*i), GFP_KERNEL);
2373 /* number of special QPs (SMI/GSI) for device */
2374 i->specials = rdi->ibdev.phys_port_cnt * 2;
2380 EXPORT_SYMBOL(rvt_qp_iter_init);
2383 * rvt_qp_iter_next - return the next QP in iter
2384 * @iter - the iterator
2386 * Fine grained QP iterator suitable for use
2387 * with debugfs seq_file mechanisms.
2389 * Updates iter->qp with the current QP when the return
2392 * Return: 0 - iter->qp is valid 1 - no more QPs
2394 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2399 struct rvt_qp *pqp = iter->qp;
2401 struct rvt_dev_info *rdi = iter->rdi;
2404 * The approach is to consider the special qps
2405 * as additional table entries before the
2406 * real hash table. Since the qp code sets
2407 * the qp->next hash link to NULL, this works just fine.
2409 * iter->specials is 2 * # ports
2411 * n = 0..iter->specials is the special qp indices
2413 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2414 * the potential hash bucket entries
2417 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2419 qp = rcu_dereference(pqp->next);
2421 if (n < iter->specials) {
2422 struct rvt_ibport *rvp;
2425 pidx = n % rdi->ibdev.phys_port_cnt;
2426 rvp = rdi->ports[pidx];
2427 qp = rcu_dereference(rvp->qp[n & 1]);
2429 qp = rcu_dereference(
2430 rdi->qp_dev->qp_table[
2431 (n - iter->specials)]);
2443 EXPORT_SYMBOL(rvt_qp_iter_next);
2446 * rvt_qp_iter - iterate all QPs
2447 * @rdi - rvt devinfo
2448 * @v - a 64 bit value
2451 * This provides a way for iterating all QPs.
2453 * The @cb is a user defined callback and @v is a 64
2454 * bit value passed to and relevant for processing in the
2455 * cb. An example use case would be to alter QP processing
2456 * based on criteria not part of the rvt_qp.
2458 * The code has an internal iterator to simplify
2459 * non seq_file use cases.
2461 void rvt_qp_iter(struct rvt_dev_info *rdi,
2463 void (*cb)(struct rvt_qp *qp, u64 v))
2466 struct rvt_qp_iter i = {
2468 .specials = rdi->ibdev.phys_port_cnt * 2,
2475 ret = rvt_qp_iter_next(&i);
2486 EXPORT_SYMBOL(rvt_qp_iter);