1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2016 - 2018 Intel Corporation.
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
12 static struct workqueue_struct *comp_vector_wq;
15 * rvt_cq_enter - add a new entry to the completion queue
16 * @cq: completion queue
17 * @entry: work completion entry to add
18 * @solicited: true if @entry is solicited
20 * This may be called with qp->s_lock held.
22 * Return: return true on success, else return
23 * false if cq is full.
25 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
27 struct ib_uverbs_wc *uqueue = NULL;
28 struct ib_wc *kqueue = NULL;
29 struct rvt_cq_wc *u_wc = NULL;
30 struct rvt_k_cq_wc *k_wc = NULL;
36 spin_lock_irqsave(&cq->lock, flags);
40 uqueue = &u_wc->uqueue[0];
41 head = RDMA_READ_UAPI_ATOMIC(u_wc->head);
42 tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail);
45 kqueue = &k_wc->kqueue[0];
51 * Note that the head pointer might be writable by
52 * user processes.Take care to verify it is a sane value.
54 if (head >= (unsigned)cq->ibcq.cqe) {
61 if (unlikely(next == tail || cq->cq_full)) {
62 struct rvt_dev_info *rdi = cq->rdi;
65 rvt_pr_err_ratelimited(rdi, "CQ is full!\n");
67 spin_unlock_irqrestore(&cq->lock, flags);
68 if (cq->ibcq.event_handler) {
71 ev.device = cq->ibcq.device;
72 ev.element.cq = &cq->ibcq;
73 ev.event = IB_EVENT_CQ_ERR;
74 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
78 trace_rvt_cq_enter(cq, entry, head);
80 uqueue[head].wr_id = entry->wr_id;
81 uqueue[head].status = entry->status;
82 uqueue[head].opcode = entry->opcode;
83 uqueue[head].vendor_err = entry->vendor_err;
84 uqueue[head].byte_len = entry->byte_len;
85 uqueue[head].ex.imm_data = entry->ex.imm_data;
86 uqueue[head].qp_num = entry->qp->qp_num;
87 uqueue[head].src_qp = entry->src_qp;
88 uqueue[head].wc_flags = entry->wc_flags;
89 uqueue[head].pkey_index = entry->pkey_index;
90 uqueue[head].slid = ib_lid_cpu16(entry->slid);
91 uqueue[head].sl = entry->sl;
92 uqueue[head].dlid_path_bits = entry->dlid_path_bits;
93 uqueue[head].port_num = entry->port_num;
94 /* Make sure entry is written before the head index. */
95 RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next);
97 kqueue[head] = *entry;
101 if (cq->notify == IB_CQ_NEXT_COMP ||
102 (cq->notify == IB_CQ_SOLICITED &&
103 (solicited || entry->status != IB_WC_SUCCESS))) {
105 * This will cause send_complete() to be called in
108 cq->notify = RVT_CQ_NONE;
110 queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
114 spin_unlock_irqrestore(&cq->lock, flags);
117 EXPORT_SYMBOL(rvt_cq_enter);
119 static void send_complete(struct work_struct *work)
121 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
124 * The completion handler will most likely rearm the notification
125 * and poll for all pending entries. If a new completion entry
126 * is added while we are in this routine, queue_work()
127 * won't call us again until we return so we check triggered to
128 * see if we need to call the handler again.
131 u8 triggered = cq->triggered;
134 * IPoIB connected mode assumes the callback is from a
135 * soft IRQ. We simulate this by blocking "bottom halves".
136 * See the implementation for ipoib_cm_handle_tx_wc(),
137 * netif_tx_lock_bh() and netif_tx_lock().
140 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
143 if (cq->triggered == triggered)
149 * rvt_create_cq - create a completion queue
150 * @ibcq: Allocated CQ
151 * @attr: creation attributes
152 * @udata: user data for libibverbs.so
154 * Called by ib_create_cq() in the generic verbs code.
156 * Return: 0 on success
158 int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
159 struct ib_udata *udata)
161 struct ib_device *ibdev = ibcq->device;
162 struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
163 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
164 struct rvt_cq_wc *u_wc = NULL;
165 struct rvt_k_cq_wc *k_wc = NULL;
167 unsigned int entries = attr->cqe;
168 int comp_vector = attr->comp_vector;
174 if (entries < 1 || entries > rdi->dparms.props.max_cqe)
180 comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
183 * Allocate the completion queue entries and head/tail pointers.
184 * This is allocated separately so that it can be resized and
185 * also mapped into user space.
186 * We need to use vmalloc() in order to support mmap and large
187 * numbers of entries.
189 if (udata && udata->outlen >= sizeof(__u64)) {
190 sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
192 u_wc = vmalloc_user(sz);
196 sz = sizeof(struct ib_wc) * (entries + 1);
198 k_wc = vzalloc_node(sz, rdi->dparms.node);
204 * Return the address of the WC as the offset to mmap.
205 * See rvt_mmap() for details.
207 if (udata && udata->outlen >= sizeof(__u64)) {
208 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
209 if (IS_ERR(cq->ip)) {
210 err = PTR_ERR(cq->ip);
214 err = ib_copy_to_udata(udata, &cq->ip->offset,
215 sizeof(cq->ip->offset));
220 spin_lock_irq(&rdi->n_cqs_lock);
221 if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
222 spin_unlock_irq(&rdi->n_cqs_lock);
227 rdi->n_cqs_allocated++;
228 spin_unlock_irq(&rdi->n_cqs_lock);
231 spin_lock_irq(&rdi->pending_lock);
232 list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
233 spin_unlock_irq(&rdi->pending_lock);
237 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
238 * The number of entries should be >= the number requested or return
242 if (rdi->driver_f.comp_vect_cpu_lookup)
243 cq->comp_vector_cpu =
244 rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
246 cq->comp_vector_cpu =
247 cpumask_first(cpumask_of_node(rdi->dparms.node));
249 cq->ibcq.cqe = entries;
250 cq->notify = RVT_CQ_NONE;
251 spin_lock_init(&cq->lock);
252 INIT_WORK(&cq->comptask, send_complete);
258 trace_rvt_create_cq(cq, attr);
270 * rvt_destroy_cq - destroy a completion queue
271 * @ibcq: the completion queue to destroy.
272 * @udata: user data or NULL for kernel object
274 * Called by ib_destroy_cq() in the generic verbs code.
276 int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
278 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
279 struct rvt_dev_info *rdi = cq->rdi;
281 flush_work(&cq->comptask);
282 spin_lock_irq(&rdi->n_cqs_lock);
283 rdi->n_cqs_allocated--;
284 spin_unlock_irq(&rdi->n_cqs_lock);
286 kref_put(&cq->ip->ref, rvt_release_mmap_info);
293 * rvt_req_notify_cq - change the notification type for a completion queue
294 * @ibcq: the completion queue
295 * @notify_flags: the type of notification to request
297 * This may be called from interrupt context. Also called by
298 * ib_req_notify_cq() in the generic verbs code.
300 * Return: 0 for success.
302 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
304 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
308 spin_lock_irqsave(&cq->lock, flags);
310 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
311 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
313 if (cq->notify != IB_CQ_NEXT_COMP)
314 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
316 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
318 if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
319 RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
322 if (cq->kqueue->head != cq->kqueue->tail)
327 spin_unlock_irqrestore(&cq->lock, flags);
333 * rvt_resize_cq - change the size of the CQ
334 * @ibcq: the completion queue
336 * Return: 0 for success.
338 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
340 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
344 struct rvt_dev_info *rdi = cq->rdi;
345 struct rvt_cq_wc *u_wc = NULL;
346 struct rvt_cq_wc *old_u_wc = NULL;
347 struct rvt_k_cq_wc *k_wc = NULL;
348 struct rvt_k_cq_wc *old_k_wc = NULL;
350 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
354 * Need to use vmalloc() if we want to support large #s of entries.
356 if (udata && udata->outlen >= sizeof(__u64)) {
357 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
359 u_wc = vmalloc_user(sz);
363 sz = sizeof(struct ib_wc) * (cqe + 1);
365 k_wc = vzalloc_node(sz, rdi->dparms.node);
369 /* Check that we can write the offset to mmap. */
370 if (udata && udata->outlen >= sizeof(__u64)) {
373 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
378 spin_lock_irq(&cq->lock);
380 * Make sure head and tail are sane since they
381 * might be user writable.
384 old_u_wc = cq->queue;
385 head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head);
386 tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail);
388 old_k_wc = cq->kqueue;
389 head = old_k_wc->head;
390 tail = old_k_wc->tail;
393 if (head > (u32)cq->ibcq.cqe)
394 head = (u32)cq->ibcq.cqe;
395 if (tail > (u32)cq->ibcq.cqe)
396 tail = (u32)cq->ibcq.cqe;
398 n = cq->ibcq.cqe + 1 + head - tail;
401 if (unlikely((u32)cqe < n)) {
405 for (n = 0; tail != head; n++) {
407 u_wc->uqueue[n] = old_u_wc->uqueue[tail];
409 k_wc->kqueue[n] = old_k_wc->kqueue[tail];
410 if (tail == (u32)cq->ibcq.cqe)
417 RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n);
418 RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0);
425 spin_unlock_irq(&cq->lock);
433 struct rvt_mmap_info *ip = cq->ip;
435 rvt_update_mmap_info(rdi, ip, sz, u_wc);
438 * Return the offset to mmap.
439 * See rvt_mmap() for details.
441 if (udata && udata->outlen >= sizeof(__u64)) {
442 ret = ib_copy_to_udata(udata, &ip->offset,
448 spin_lock_irq(&rdi->pending_lock);
449 if (list_empty(&ip->pending_mmaps))
450 list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
451 spin_unlock_irq(&rdi->pending_lock);
457 spin_unlock_irq(&cq->lock);
466 * rvt_poll_cq - poll for work completion entries
467 * @ibcq: the completion queue to poll
468 * @num_entries: the maximum number of entries to return
469 * @entry: pointer to array where work completions are placed
471 * This may be called from interrupt context. Also called by ib_poll_cq()
472 * in the generic verbs code.
474 * Return: the number of completion entries polled.
476 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
478 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
479 struct rvt_k_cq_wc *wc;
484 /* The kernel can only poll a kernel completion queue */
488 spin_lock_irqsave(&cq->lock, flags);
492 if (tail > (u32)cq->ibcq.cqe)
493 tail = (u32)cq->ibcq.cqe;
494 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
495 if (tail == wc->head)
497 /* The kernel doesn't need a RMB since it has the lock. */
498 trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
499 *entry = wc->kqueue[tail];
500 if (tail >= cq->ibcq.cqe)
507 spin_unlock_irqrestore(&cq->lock, flags);
513 * rvt_driver_cq_init - Init cq resources on behalf of driver
515 * Return: 0 on success
517 int rvt_driver_cq_init(void)
519 comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
528 * rvt_cq_exit - tear down cq reources
530 void rvt_cq_exit(void)
532 destroy_workqueue(comp_vector_wq);
533 comp_vector_wq = NULL;