1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
46 * i40iw_initialize_hw_resources - initialize hw resource during open
47 * @iwdev: iwarp device
49 u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
51 unsigned long num_pds;
60 max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt;
61 max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
62 max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
63 arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
64 iwdev->max_cqe = 0xFFFFF;
65 num_pds = I40IW_MAX_PDS;
66 resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
67 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
68 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
69 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
70 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
71 resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
72 resources_size += sizeof(struct i40iw_qp **) * max_qp;
73 iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL);
75 if (!iwdev->mem_resources)
78 iwdev->max_qp = max_qp;
79 iwdev->max_mr = max_mr;
80 iwdev->max_cq = max_cq;
81 iwdev->max_pd = num_pds;
82 iwdev->arp_table_size = arp_table_size;
83 iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources;
84 resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size);
86 iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
87 IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS;
89 iwdev->allocated_qps = resource_ptr;
90 iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)];
91 iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)];
92 iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)];
93 iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)];
94 iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
95 set_bit(0, iwdev->allocated_mrs);
96 set_bit(0, iwdev->allocated_qps);
97 set_bit(0, iwdev->allocated_cqs);
98 set_bit(0, iwdev->allocated_pds);
99 set_bit(0, iwdev->allocated_arps);
101 /* Following for ILQ/IEQ */
102 set_bit(1, iwdev->allocated_qps);
103 set_bit(1, iwdev->allocated_cqs);
104 set_bit(1, iwdev->allocated_pds);
105 set_bit(2, iwdev->allocated_cqs);
106 set_bit(2, iwdev->allocated_pds);
108 spin_lock_init(&iwdev->resource_lock);
109 spin_lock_init(&iwdev->qptable_lock);
110 /* stag index mask has a minimum of 14 bits */
111 mrdrvbits = 24 - max(get_count_order(iwdev->max_mr), 14);
112 iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
117 * i40iw_cqp_ce_handler - handle cqp completions
118 * @iwdev: iwarp device
119 * @arm: flag to arm after completions
120 * @cq: cq for cqp completions
122 static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm)
124 struct i40iw_cqp_request *cqp_request;
125 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
127 struct i40iw_ccq_cqe_info info;
131 memset(&info, 0, sizeof(info));
132 ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);
135 cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch;
137 i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
138 info.op_code, info.maj_err_code, info.min_err_code);
140 cqp_request->compl_info.maj_err_code = info.maj_err_code;
141 cqp_request->compl_info.min_err_code = info.min_err_code;
142 cqp_request->compl_info.op_ret_val = info.op_ret_val;
143 cqp_request->compl_info.error = info.error;
145 if (cqp_request->waiting) {
146 cqp_request->request_done = true;
147 wake_up(&cqp_request->waitq);
148 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
150 if (cqp_request->callback_fcn)
151 cqp_request->callback_fcn(cqp_request, 1);
152 i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
159 if (arm && cqe_count) {
160 i40iw_process_bh(dev);
161 dev->ccq_ops->ccq_arm(cq);
166 * i40iw_iwarp_ce_handler - handle iwarp completions
167 * @iwdev: iwarp device
168 * @iwcp: iwarp cq receiving event
170 static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
171 struct i40iw_sc_cq *iwcq)
173 struct i40iw_cq *i40iwcq = iwcq->back_cq;
175 if (i40iwcq->ibcq.comp_handler)
176 i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq,
177 i40iwcq->ibcq.cq_context);
181 * i40iw_puda_ce_handler - handle puda completion events
182 * @iwdev: iwarp device
183 * @cq: puda completion q for event
185 static void i40iw_puda_ce_handler(struct i40iw_device *iwdev,
186 struct i40iw_sc_cq *cq)
188 struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev;
189 enum i40iw_status_code status;
193 status = i40iw_puda_poll_completion(dev, cq, &compl_error);
194 if (status == I40IW_ERR_QUEUE_EMPTY)
197 i40iw_pr_err("puda status = %d\n", status);
201 i40iw_pr_err("puda compl_err =0x%x\n", compl_error);
206 dev->ccq_ops->ccq_arm(cq);
210 * i40iw_process_ceq - handle ceq for completions
211 * @iwdev: iwarp device
212 * @ceq: ceq having cq for completion
214 void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq)
216 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
217 struct i40iw_sc_ceq *sc_ceq;
218 struct i40iw_sc_cq *cq;
221 sc_ceq = &ceq->sc_ceq;
223 cq = dev->ceq_ops->process_ceq(dev, sc_ceq);
227 if (cq->cq_type == I40IW_CQ_TYPE_CQP)
228 i40iw_cqp_ce_handler(iwdev, cq, arm);
229 else if (cq->cq_type == I40IW_CQ_TYPE_IWARP)
230 i40iw_iwarp_ce_handler(iwdev, cq);
231 else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) ||
232 (cq->cq_type == I40IW_CQ_TYPE_IEQ))
233 i40iw_puda_ce_handler(iwdev, cq);
238 * i40iw_next_iw_state - modify qp state
239 * @iwqp: iwarp qp to modify
240 * @state: next state for qp
241 * @del_hash: del hash
242 * @term: term message
243 * @termlen: length of term message
245 void i40iw_next_iw_state(struct i40iw_qp *iwqp,
251 struct i40iw_modify_qp_info info;
253 memset(&info, 0, sizeof(info));
254 info.next_iwarp_state = state;
255 info.remove_hash_idx = del_hash;
256 info.cq_num_valid = true;
257 info.arp_cache_idx_valid = true;
258 info.dont_send_term = true;
259 info.dont_send_fin = true;
260 info.termlen = termlen;
262 if (term & I40IWQP_TERM_SEND_TERM_ONLY)
263 info.dont_send_term = false;
264 if (term & I40IWQP_TERM_SEND_FIN_ONLY)
265 info.dont_send_fin = false;
266 if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
267 info.reset_tcp_conn = true;
268 iwqp->hw_iwarp_state = state;
269 i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
273 * i40iw_process_aeq - handle aeq events
274 * @iwdev: iwarp device
276 void i40iw_process_aeq(struct i40iw_device *iwdev)
278 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
279 struct i40iw_aeq *aeq = &iwdev->aeq;
280 struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
281 struct i40iw_aeqe_info aeinfo;
282 struct i40iw_aeqe_info *info = &aeinfo;
284 struct i40iw_qp *iwqp = NULL;
285 struct i40iw_sc_cq *cq = NULL;
286 struct i40iw_cq *iwcq = NULL;
287 struct i40iw_sc_qp *qp = NULL;
288 struct i40iw_qp_host_ctx_info *ctx_info = NULL;
297 memset(info, 0, sizeof(*info));
298 ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
303 i40iw_debug(dev, I40IW_DEBUG_AEQ,
304 "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
305 __func__, info->ae_id, info->qp, info->qp_cq_id);
307 spin_lock_irqsave(&iwdev->qptable_lock, flags);
308 iwqp = iwdev->qp_table[info->qp_cq_id];
310 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
311 i40iw_debug(dev, I40IW_DEBUG_AEQ,
312 "%s qp_id %d is already freed\n",
313 __func__, info->qp_cq_id);
316 i40iw_add_ref(&iwqp->ibqp);
317 spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
319 spin_lock_irqsave(&iwqp->lock, flags);
320 iwqp->hw_tcp_state = info->tcp_state;
321 iwqp->hw_iwarp_state = info->iwarp_state;
322 iwqp->last_aeq = info->ae_id;
323 spin_unlock_irqrestore(&iwqp->lock, flags);
324 ctx_info = &iwqp->ctx_info;
325 ctx_info->err_rq_idx_valid = true;
327 if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
331 switch (info->ae_id) {
332 case I40IW_AE_LLP_FIN_RECEIVED:
335 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
336 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
337 if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
338 (iwqp->ibqp_state == IB_QPS_RTS)) {
339 i40iw_next_iw_state(iwqp,
340 I40IW_QP_STATE_CLOSING, 0, 0, 0);
341 i40iw_cm_disconn(iwqp);
343 iwqp->cm_id->add_ref(iwqp->cm_id);
344 i40iw_schedule_cm_timer(iwqp->cm_node,
345 (struct i40iw_puda_buf *)iwqp,
346 I40IW_TIMER_TYPE_CLOSE, 1, 0);
349 case I40IW_AE_LLP_CLOSE_COMPLETE:
351 i40iw_terminate_done(qp, 0);
353 i40iw_cm_disconn(iwqp);
355 case I40IW_AE_RESET_SENT:
356 i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
357 i40iw_cm_disconn(iwqp);
359 case I40IW_AE_LLP_CONNECTION_RESET:
360 if (atomic_read(&iwqp->close_timer_started))
362 i40iw_cm_disconn(iwqp);
364 case I40IW_AE_QP_SUSPEND_COMPLETE:
365 i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false);
367 case I40IW_AE_TERMINATE_SENT:
368 i40iw_terminate_send_fin(qp);
370 case I40IW_AE_LLP_TERMINATE_RECEIVED:
371 i40iw_terminate_received(qp, info);
373 case I40IW_AE_CQ_OPERATION_ERROR:
374 i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
376 cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
377 iwcq = (struct i40iw_cq *)cq->back_cq;
379 if (iwcq->ibcq.event_handler) {
380 struct ib_event ibevent;
382 ibevent.device = iwcq->ibcq.device;
383 ibevent.event = IB_EVENT_CQ_ERR;
384 ibevent.element.cq = &iwcq->ibcq;
385 iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
388 case I40IW_AE_PRIV_OPERATION_DENIED:
389 case I40IW_AE_STAG_ZERO_INVALID:
390 case I40IW_AE_IB_RREQ_AND_Q1_FULL:
391 case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
392 case I40IW_AE_DDP_UBE_INVALID_MO:
393 case I40IW_AE_DDP_UBE_INVALID_QN:
394 case I40IW_AE_DDP_NO_L_BIT:
395 case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
396 case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
397 case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
398 case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
399 case I40IW_AE_INVALID_ARP_ENTRY:
400 case I40IW_AE_INVALID_TCP_OPTION_RCVD:
401 case I40IW_AE_STALE_ARP_ENTRY:
402 case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
403 case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
404 case I40IW_AE_LLP_SYN_RECEIVED:
405 case I40IW_AE_LLP_TOO_MANY_RETRIES:
406 case I40IW_AE_LLP_DOUBT_REACHABILITY:
407 case I40IW_AE_LCE_QP_CATASTROPHIC:
408 case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
409 case I40IW_AE_LCE_CQ_CATASTROPHIC:
410 case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
411 case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
412 ctx_info->err_rq_idx_valid = false;
414 if (!info->sq && ctx_info->err_rq_idx_valid) {
415 ctx_info->err_rq_idx = info->wqe_idx;
416 ctx_info->tcp_info_valid = false;
417 ctx_info->iwarp_info_valid = false;
418 ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
422 i40iw_terminate_connection(qp, info);
426 i40iw_rem_ref(&iwqp->ibqp);
430 dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
434 * i40iw_manage_apbvt - add or delete tcp port
435 * @iwdev: iwarp device
436 * @accel_local_port: port for apbvt
437 * @add_port: add or delete port
439 int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port)
441 struct i40iw_apbvt_info *info;
442 enum i40iw_status_code status;
443 struct i40iw_cqp_request *cqp_request;
444 struct cqp_commands_info *cqp_info;
446 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
450 cqp_info = &cqp_request->info;
451 info = &cqp_info->in.u.manage_apbvt_entry.info;
453 memset(info, 0, sizeof(*info));
454 info->add = add_port;
455 info->port = cpu_to_le16(accel_local_port);
457 cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY;
458 cqp_info->post_sq = 1;
459 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp;
460 cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
461 status = i40iw_handle_cqp_op(iwdev, cqp_request);
463 i40iw_pr_err("CQP-OP Manage APBVT entry fail");
468 * i40iw_manage_arp_cache - manage hw arp cache
469 * @iwdev: iwarp device
470 * @mac_addr: mac address ptr
471 * @ip_addr: ip addr for arp cache
472 * @action: add, delete or modify
474 void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
475 unsigned char *mac_addr,
480 struct i40iw_add_arp_cache_entry_info *info;
481 struct i40iw_cqp_request *cqp_request;
482 struct cqp_commands_info *cqp_info;
485 arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
488 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
492 cqp_info = &cqp_request->info;
493 if (action == I40IW_ARP_ADD) {
494 cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;
495 info = &cqp_info->in.u.add_arp_cache_entry.info;
496 memset(info, 0, sizeof(*info));
497 info->arp_index = cpu_to_le16((u16)arp_index);
498 info->permanent = true;
499 ether_addr_copy(info->mac_addr, mac_addr);
500 cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
501 cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
503 cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY;
504 cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request;
505 cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
506 cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
509 cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
510 cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
511 cqp_info->post_sq = 1;
512 if (i40iw_handle_cqp_op(iwdev, cqp_request))
513 i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
517 * i40iw_send_syn_cqp_callback - do syn/ack after qhash
518 * @cqp_request: qhash cqp completion
519 * @send_ack: flag send ack
521 static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack)
523 i40iw_send_syn(cqp_request->param, send_ack);
527 * i40iw_manage_qhash - add or modify qhash
528 * @iwdev: iwarp device
529 * @cminfo: cm info for qhash
530 * @etype: type (syn or quad)
531 * @mtype: type of qhash
532 * @cmnode: cmnode associated with connection
533 * @wait: wait for completion
534 * @user_pri:user pri of the connection
536 enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
537 struct i40iw_cm_info *cminfo,
538 enum i40iw_quad_entry_type etype,
539 enum i40iw_quad_hash_manage_type mtype,
543 struct i40iw_qhash_table_info *info;
544 struct i40iw_sc_dev *dev = &iwdev->sc_dev;
545 struct i40iw_sc_vsi *vsi = &iwdev->vsi;
546 enum i40iw_status_code status;
547 struct i40iw_cqp *iwcqp = &iwdev->cqp;
548 struct i40iw_cqp_request *cqp_request;
549 struct cqp_commands_info *cqp_info;
551 cqp_request = i40iw_get_cqp_request(iwcqp, wait);
553 return I40IW_ERR_NO_MEMORY;
554 cqp_info = &cqp_request->info;
555 info = &cqp_info->in.u.manage_qhash_table_entry.info;
556 memset(info, 0, sizeof(*info));
558 info->vsi = &iwdev->vsi;
559 info->manage = mtype;
560 info->entry_type = etype;
561 if (cminfo->vlan_id != 0xFFFF) {
562 info->vlan_valid = true;
563 info->vlan_id = cpu_to_le16(cminfo->vlan_id);
565 info->vlan_valid = false;
568 info->ipv4_valid = cminfo->ipv4;
569 info->user_pri = cminfo->user_pri;
570 ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
571 info->qp_num = cpu_to_le32(vsi->ilq->qp_id);
572 info->dest_port = cpu_to_le16(cminfo->loc_port);
573 info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
574 info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
575 info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);
576 info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);
577 if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
578 info->src_port = cpu_to_le16(cminfo->rem_port);
579 info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);
580 info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);
581 info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);
582 info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);
585 cqp_request->callback_fcn = i40iw_send_syn_cqp_callback;
586 cqp_request->param = (void *)cmnode;
589 if (info->ipv4_valid)
590 i40iw_debug(dev, I40IW_DEBUG_CM,
591 "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
592 __func__, (!mtype) ? "DELETE" : "ADD",
594 info->dest_port, info->mac_addr, cminfo->vlan_id);
596 i40iw_debug(dev, I40IW_DEBUG_CM,
597 "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
598 __func__, (!mtype) ? "DELETE" : "ADD",
600 info->dest_port, info->mac_addr, cminfo->vlan_id);
601 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;
602 cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
603 cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;
604 cqp_info->post_sq = 1;
605 status = i40iw_handle_cqp_op(iwdev, cqp_request);
607 i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
612 * i40iw_hw_flush_wqes - flush qp's wqe
613 * @iwdev: iwarp device
614 * @qp: hardware control qp
615 * @info: info for flush
616 * @wait: flag wait for completion
618 enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
619 struct i40iw_sc_qp *qp,
620 struct i40iw_qp_flush_info *info,
623 enum i40iw_status_code status;
624 struct i40iw_qp_flush_info *hw_info;
625 struct i40iw_cqp_request *cqp_request;
626 struct cqp_commands_info *cqp_info;
627 struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
629 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
631 return I40IW_ERR_NO_MEMORY;
633 cqp_info = &cqp_request->info;
634 hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
635 memcpy(hw_info, info, sizeof(*hw_info));
637 cqp_info->cqp_cmd = OP_QP_FLUSH_WQES;
638 cqp_info->post_sq = 1;
639 cqp_info->in.u.qp_flush_wqes.qp = qp;
640 cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
641 status = i40iw_handle_cqp_op(iwdev, cqp_request);
643 i40iw_pr_err("CQP-OP Flush WQE's fail");
644 complete(&iwqp->sq_drained);
645 complete(&iwqp->rq_drained);
648 if (!cqp_request->compl_info.maj_err_code) {
649 switch (cqp_request->compl_info.min_err_code) {
650 case I40IW_CQP_COMPL_RQ_WQE_FLUSHED:
651 complete(&iwqp->sq_drained);
653 case I40IW_CQP_COMPL_SQ_WQE_FLUSHED:
654 complete(&iwqp->rq_drained);
656 case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED:
659 complete(&iwqp->sq_drained);
660 complete(&iwqp->rq_drained);
669 * i40iw_hw_manage_vf_pble_bp - manage vf pbles
670 * @iwdev: iwarp device
671 * @info: info for managing pble
672 * @wait: flag wait for completion
674 enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
675 struct i40iw_manage_vf_pble_info *info,
678 enum i40iw_status_code status;
679 struct i40iw_manage_vf_pble_info *hw_info;
680 struct i40iw_cqp_request *cqp_request;
681 struct cqp_commands_info *cqp_info;
683 if ((iwdev->init_state < CCQ_CREATED) && wait)
686 cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
688 return I40IW_ERR_NO_MEMORY;
690 cqp_info = &cqp_request->info;
691 hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info;
692 memcpy(hw_info, info, sizeof(*hw_info));
694 cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP;
695 cqp_info->post_sq = 1;
696 cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp;
697 cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request;
698 status = i40iw_handle_cqp_op(iwdev, cqp_request);
700 i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
705 * i40iw_get_ib_wc - return change flush code to IB's
706 * @opcode: iwarp flush code
708 static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode)
712 return IB_WC_LOC_PROT_ERR;
713 case FLUSH_REM_ACCESS_ERR:
714 return IB_WC_REM_ACCESS_ERR;
715 case FLUSH_LOC_QP_OP_ERR:
716 return IB_WC_LOC_QP_OP_ERR;
717 case FLUSH_REM_OP_ERR:
718 return IB_WC_REM_OP_ERR;
719 case FLUSH_LOC_LEN_ERR:
720 return IB_WC_LOC_LEN_ERR;
721 case FLUSH_GENERAL_ERR:
722 return IB_WC_GENERAL_ERR;
723 case FLUSH_FATAL_ERR:
725 return IB_WC_FATAL_ERR;
730 * i40iw_set_flush_info - set flush info
731 * @pinfo: set flush info
734 * @opcode: flush error code
736 static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo,
739 enum i40iw_flush_opcode opcode)
741 *min = (u16)i40iw_get_ib_wc(opcode);
742 *maj = CQE_MAJOR_DRV;
743 pinfo->userflushcode = true;
747 * i40iw_flush_wqes - flush wqe for qp
748 * @iwdev: iwarp device
749 * @iwqp: qp to flush wqes
751 void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp)
753 struct i40iw_qp_flush_info info;
754 struct i40iw_qp_flush_info *pinfo = &info;
756 struct i40iw_sc_qp *qp = &iwqp->sc_qp;
758 memset(pinfo, 0, sizeof(*pinfo));
761 if (qp->term_flags) {
762 i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code,
763 &pinfo->sq_major_code, qp->flush_code);
764 i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code,
765 &pinfo->rq_major_code, qp->flush_code);
767 (void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true);