1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/workqueue.h>
43 #include <linux/bitops.h>
44 #include <linux/delay.h>
45 #include <linux/errno.h>
46 #include <linux/etherdevice.h>
48 #include <linux/list.h>
49 #include <linux/mutex.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/qed/qed_ll2_if.h>
55 #include "qed_dev_api.h"
62 #include "qed_reg_addr.h"
66 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
67 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
69 #define QED_LL2_TX_SIZE (256)
70 #define QED_LL2_RX_SIZE (4096)
72 struct qed_cb_ll2_info {
77 /* Lock protecting LL2 buffer lists in sleepless context */
79 struct list_head list;
81 const struct qed_ll2_cb_ops *cbs;
85 struct qed_ll2_buffer {
86 struct list_head list;
91 static void qed_ll2b_complete_tx_packet(void *cxt,
94 dma_addr_t first_frag_addr,
98 struct qed_hwfn *p_hwfn = cxt;
99 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
110 dev_kfree_skb_any(skb);
113 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
114 u8 **data, dma_addr_t *phys_addr)
116 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
118 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
122 *phys_addr = dma_map_single(&cdev->pdev->dev,
123 ((*data) + NET_SKB_PAD),
124 cdev->ll2->rx_size, DMA_FROM_DEVICE);
125 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
126 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
135 struct qed_ll2_buffer *buffer)
137 spin_lock_bh(&cdev->ll2->lock);
139 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
140 cdev->ll2->rx_size, DMA_FROM_DEVICE);
142 list_del(&buffer->list);
145 if (!cdev->ll2->rx_cnt)
146 DP_INFO(cdev, "All LL2 entries were removed\n");
148 spin_unlock_bh(&cdev->ll2->lock);
153 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
155 struct qed_ll2_buffer *buffer, *tmp_buffer;
157 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
158 qed_ll2_dealloc_buffer(cdev, buffer);
161 void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
163 struct qed_hwfn *p_hwfn = cxt;
164 struct qed_ll2_buffer *buffer = data->cookie;
165 struct qed_dev *cdev = p_hwfn->cdev;
166 dma_addr_t new_phys_addr;
173 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
174 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
175 (u64)data->rx_buf_addr,
176 data->u.placement_offset,
177 data->length.packet_length,
179 data->vlan, data->opaque_data_0, data->opaque_data_1);
181 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
182 print_hex_dump(KERN_INFO, "",
183 DUMP_PREFIX_OFFSET, 16, 1,
184 buffer->data, data->length.packet_length, false);
187 /* Determine if data is valid */
188 if (data->length.packet_length < ETH_HLEN)
191 /* Allocate a replacement for buffer; Reuse upon failure */
193 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
196 /* If need to reuse or there's no replacement buffer, repost this */
199 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
200 cdev->ll2->rx_size, DMA_FROM_DEVICE);
202 skb = build_skb(buffer->data, 0);
204 DP_INFO(cdev, "Failed to build SKB\n");
209 data->u.placement_offset += NET_SKB_PAD;
210 skb_reserve(skb, data->u.placement_offset);
211 skb_put(skb, data->length.packet_length);
212 skb_checksum_none_assert(skb);
214 /* Get parital ethernet information instead of eth_type_trans(),
215 * Since we don't have an associated net_device.
217 skb_reset_mac_header(skb);
218 skb->protocol = eth_hdr(skb)->h_proto;
220 /* Pass SKB onward */
221 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
223 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
225 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
227 data->opaque_data_1);
229 DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
230 QED_MSG_LL2 | QED_MSG_STORAGE),
231 "Dropping the packet\n");
236 /* Update Buffer information and update FW producer */
237 buffer->data = new_data;
238 buffer->phys_addr = new_phys_addr;
241 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
242 buffer->phys_addr, 0, buffer, 1);
245 qed_ll2_dealloc_buffer(cdev, buffer);
248 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
249 u8 connection_handle,
253 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
255 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
258 if (!p_hwfn->p_ll2_info)
261 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
265 mutex_lock(&p_ll2_conn->mutex);
266 if (p_ll2_conn->b_active)
269 mutex_unlock(&p_ll2_conn->mutex);
277 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
278 u8 connection_handle)
280 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
283 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
284 u8 connection_handle)
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
289 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
291 u8 connection_handle)
293 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
296 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
298 bool b_last_packet = false, b_last_frag = false;
299 struct qed_ll2_tx_packet *p_pkt = NULL;
300 struct qed_ll2_info *p_ll2_conn;
301 struct qed_ll2_tx_queue *p_tx;
304 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
308 p_tx = &p_ll2_conn->tx_queue;
310 while (!list_empty(&p_tx->active_descq)) {
311 p_pkt = list_first_entry(&p_tx->active_descq,
312 struct qed_ll2_tx_packet, list_entry);
316 list_del(&p_pkt->list_entry);
317 b_last_packet = list_empty(&p_tx->active_descq);
318 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
319 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
320 struct qed_ooo_buffer *p_buffer;
322 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
323 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
326 p_tx->cur_completing_packet = *p_pkt;
327 p_tx->cur_completing_bd_idx = 1;
329 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
330 tx_frag = p_pkt->bds_set[0].tx_frag;
331 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
341 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
343 struct qed_ll2_info *p_ll2_conn = p_cookie;
344 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
345 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
346 struct qed_ll2_tx_packet *p_pkt;
347 bool b_last_frag = false;
351 spin_lock_irqsave(&p_tx->lock, flags);
352 if (p_tx->b_completing_packet) {
357 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
358 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
360 if (list_empty(&p_tx->active_descq))
363 p_pkt = list_first_entry(&p_tx->active_descq,
364 struct qed_ll2_tx_packet, list_entry);
368 p_tx->b_completing_packet = true;
369 p_tx->cur_completing_packet = *p_pkt;
370 num_bds_in_packet = p_pkt->bd_used;
371 list_del(&p_pkt->list_entry);
373 if (num_bds < num_bds_in_packet) {
375 "Rest of BDs does not cover whole packet\n");
379 num_bds -= num_bds_in_packet;
380 p_tx->bds_idx += num_bds_in_packet;
381 while (num_bds_in_packet--)
382 qed_chain_consume(&p_tx->txq_chain);
384 p_tx->cur_completing_bd_idx = 1;
385 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
386 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
388 spin_unlock_irqrestore(&p_tx->lock, flags);
390 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
393 p_pkt->bds_set[0].tx_frag,
394 b_last_frag, !num_bds);
396 spin_lock_irqsave(&p_tx->lock, flags);
399 p_tx->b_completing_packet = false;
402 spin_unlock_irqrestore(&p_tx->lock, flags);
406 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
407 union core_rx_cqe_union *p_cqe,
408 struct qed_ll2_comp_rx_data *data)
410 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
411 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
412 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
413 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
414 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
415 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
418 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
419 union core_rx_cqe_union *p_cqe,
420 struct qed_ll2_comp_rx_data *data)
422 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
423 data->length.packet_length =
424 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
425 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
426 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
427 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
428 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
432 qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
433 struct qed_ll2_info *p_ll2_conn,
434 union core_rx_cqe_union *p_cqe,
435 unsigned long *p_lock_flags, bool b_last_cqe)
437 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
438 struct qed_ll2_rx_packet *p_pkt = NULL;
439 struct qed_ll2_comp_rx_data data;
441 if (!list_empty(&p_rx->active_descq))
442 p_pkt = list_first_entry(&p_rx->active_descq,
443 struct qed_ll2_rx_packet, list_entry);
446 "[%d] LL2 Rx completion but active_descq is empty\n",
447 p_ll2_conn->input.conn_type);
451 list_del(&p_pkt->list_entry);
453 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
454 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
456 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
457 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
459 "Mismatch between active_descq and the LL2 Rx chain\n");
461 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
463 data.connection_handle = p_ll2_conn->my_id;
464 data.cookie = p_pkt->cookie;
465 data.rx_buf_addr = p_pkt->rx_buf_addr;
466 data.b_last_packet = b_last_cqe;
468 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
469 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
471 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
476 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
478 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
479 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
480 union core_rx_cqe_union *cqe = NULL;
481 u16 cq_new_idx = 0, cq_old_idx = 0;
482 unsigned long flags = 0;
485 spin_lock_irqsave(&p_rx->lock, flags);
486 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
487 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
489 while (cq_new_idx != cq_old_idx) {
490 bool b_last_cqe = (cq_new_idx == cq_old_idx);
493 (union core_rx_cqe_union *)
494 qed_chain_consume(&p_rx->rcq_chain);
495 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
499 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
500 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
502 switch (cqe->rx_cqe_sp.type) {
503 case CORE_RX_CQE_TYPE_SLOW_PATH:
504 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
507 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
508 case CORE_RX_CQE_TYPE_REGULAR:
509 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
518 spin_unlock_irqrestore(&p_rx->lock, flags);
522 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
524 struct qed_ll2_info *p_ll2_conn = NULL;
525 struct qed_ll2_rx_packet *p_pkt = NULL;
526 struct qed_ll2_rx_queue *p_rx;
528 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
532 p_rx = &p_ll2_conn->rx_queue;
534 while (!list_empty(&p_rx->active_descq)) {
535 p_pkt = list_first_entry(&p_rx->active_descq,
536 struct qed_ll2_rx_packet, list_entry);
540 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
542 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
543 struct qed_ooo_buffer *p_buffer;
545 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
546 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
549 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
550 void *cookie = p_pkt->cookie;
553 b_last = list_empty(&p_rx->active_descq);
554 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
557 rx_buf_addr, b_last);
562 static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
566 if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
567 SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
572 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
573 struct qed_ll2_info *p_ll2_conn)
575 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
576 u16 packet_length = 0, parse_flags = 0, vlan = 0;
577 struct qed_ll2_rx_packet *p_pkt = NULL;
578 u32 num_ooo_add_to_peninsula = 0, cid;
579 union core_rx_cqe_union *cqe = NULL;
580 u16 cq_new_idx = 0, cq_old_idx = 0;
581 struct qed_ooo_buffer *p_buffer;
582 struct ooo_opaque *iscsi_ooo;
583 u8 placement_offset = 0;
586 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
587 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
588 if (cq_new_idx == cq_old_idx)
591 while (cq_new_idx != cq_old_idx) {
592 struct core_rx_fast_path_cqe *p_cqe_fp;
594 cqe = qed_chain_consume(&p_rx->rcq_chain);
595 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
596 cqe_type = cqe->rx_cqe_sp.type;
598 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
600 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
604 p_cqe_fp = &cqe->rx_cqe_fp;
606 placement_offset = p_cqe_fp->placement_offset;
607 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
608 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
609 vlan = le16_to_cpu(p_cqe_fp->vlan);
610 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
611 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
613 cid = le32_to_cpu(iscsi_ooo->cid);
615 /* Process delete isle first */
616 if (iscsi_ooo->drop_size)
617 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
618 iscsi_ooo->drop_isle,
619 iscsi_ooo->drop_size);
621 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
624 /* Now process create/add/join isles */
625 if (list_empty(&p_rx->active_descq)) {
627 "LL2 OOO RX chain has no submitted buffers\n"
632 p_pkt = list_first_entry(&p_rx->active_descq,
633 struct qed_ll2_rx_packet, list_entry);
635 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
636 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
637 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
638 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
639 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
642 "LL2 OOO RX packet is not valid\n");
645 list_del(&p_pkt->list_entry);
646 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
647 p_buffer->packet_length = packet_length;
648 p_buffer->parse_flags = parse_flags;
649 p_buffer->vlan = vlan;
650 p_buffer->placement_offset = placement_offset;
651 qed_chain_consume(&p_rx->rxq_chain);
652 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
654 switch (iscsi_ooo->ooo_opcode) {
655 case TCP_EVENT_ADD_NEW_ISLE:
656 qed_ooo_add_new_isle(p_hwfn,
662 case TCP_EVENT_ADD_ISLE_RIGHT:
663 qed_ooo_add_new_buffer(p_hwfn,
670 case TCP_EVENT_ADD_ISLE_LEFT:
671 qed_ooo_add_new_buffer(p_hwfn,
679 qed_ooo_add_new_buffer(p_hwfn,
682 iscsi_ooo->ooo_isle +
686 qed_ooo_join_isles(p_hwfn,
688 cid, iscsi_ooo->ooo_isle);
690 case TCP_EVENT_ADD_PEN:
691 num_ooo_add_to_peninsula++;
692 qed_ooo_put_ready_buffer(p_hwfn,
699 "Unexpected event (%d) TX OOO completion\n",
700 iscsi_ooo->ooo_opcode);
708 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
709 struct qed_ll2_info *p_ll2_conn)
711 struct qed_ll2_tx_pkt_info tx_pkt;
712 struct qed_ooo_buffer *p_buffer;
714 dma_addr_t first_frag;
719 /* Submit Tx buffers here */
720 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
721 p_hwfn->p_ooo_info))) {
725 first_frag = p_buffer->rx_buffer_phys_addr +
726 p_buffer->placement_offset;
727 parse_flags = p_buffer->parse_flags;
728 bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
729 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
730 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
732 memset(&tx_pkt, 0, sizeof(tx_pkt));
733 tx_pkt.num_of_bds = 1;
734 tx_pkt.vlan = p_buffer->vlan;
735 tx_pkt.bd_flags = bd_flags;
736 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
737 tx_pkt.tx_dest = p_ll2_conn->tx_dest;
738 tx_pkt.first_frag = first_frag;
739 tx_pkt.first_frag_len = p_buffer->packet_length;
740 tx_pkt.cookie = p_buffer;
742 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
745 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
753 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
754 struct qed_ll2_info *p_ll2_conn)
756 struct qed_ooo_buffer *p_buffer;
759 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
760 p_hwfn->p_ooo_info))) {
761 rc = qed_ll2_post_rx_buffer(p_hwfn,
763 p_buffer->rx_buffer_phys_addr,
766 qed_ooo_put_free_buffer(p_hwfn,
767 p_hwfn->p_ooo_info, p_buffer);
773 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
775 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
778 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
782 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
783 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
788 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
790 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
791 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
792 struct qed_ll2_tx_packet *p_pkt = NULL;
793 struct qed_ooo_buffer *p_buffer;
794 bool b_dont_submit_rx = false;
795 u16 new_idx = 0, num_bds = 0;
798 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
799 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
805 if (list_empty(&p_tx->active_descq))
808 p_pkt = list_first_entry(&p_tx->active_descq,
809 struct qed_ll2_tx_packet, list_entry);
813 if (p_pkt->bd_used != 1) {
815 "Unexpectedly many BDs(%d) in TX OOO completion\n",
820 list_del(&p_pkt->list_entry);
824 qed_chain_consume(&p_tx->txq_chain);
826 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
827 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
829 if (b_dont_submit_rx) {
830 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
835 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
836 p_buffer->rx_buffer_phys_addr, 0,
839 qed_ooo_put_free_buffer(p_hwfn,
840 p_hwfn->p_ooo_info, p_buffer);
841 b_dont_submit_rx = true;
845 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
850 static void qed_ll2_stop_ooo(struct qed_dev *cdev)
852 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
853 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
855 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Stopping LL2 OOO queue [%02x]\n",
858 qed_ll2_terminate_connection(hwfn, *handle);
859 qed_ll2_release_connection(hwfn, *handle);
860 *handle = QED_LL2_UNUSED_HANDLE;
863 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
864 struct qed_ll2_info *p_ll2_conn,
867 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
868 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
869 struct core_rx_start_ramrod_data *p_ramrod = NULL;
870 struct qed_spq_entry *p_ent = NULL;
871 struct qed_sp_init_data init_data;
876 memset(&init_data, 0, sizeof(init_data));
877 init_data.cid = p_ll2_conn->cid;
878 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
879 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
881 rc = qed_sp_init_request(p_hwfn, &p_ent,
882 CORE_RAMROD_RX_QUEUE_START,
883 PROTOCOLID_CORE, &init_data);
887 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
889 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
890 p_ramrod->sb_index = p_rx->rx_sb_index;
891 p_ramrod->complete_event_flg = 1;
893 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
894 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
895 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
896 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
897 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
898 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
900 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
901 p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
902 p_ramrod->queue_id = p_ll2_conn->queue_id;
903 p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1;
905 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
906 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
907 (conn_type != QED_LL2_TYPE_IWARP)) {
908 p_ramrod->mf_si_bcast_accept_all = 1;
909 p_ramrod->mf_si_mcast_accept_all = 1;
911 p_ramrod->mf_si_bcast_accept_all = 0;
912 p_ramrod->mf_si_mcast_accept_all = 0;
915 p_ramrod->action_on_error.error_type = action_on_error;
916 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
917 return qed_spq_post(p_hwfn, p_ent, NULL);
920 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
921 struct qed_ll2_info *p_ll2_conn)
923 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
924 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
925 struct core_tx_start_ramrod_data *p_ramrod = NULL;
926 struct qed_spq_entry *p_ent = NULL;
927 struct qed_sp_init_data init_data;
928 u16 pq_id = 0, pbl_size;
931 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
934 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
935 p_ll2_conn->tx_stats_en = 0;
937 p_ll2_conn->tx_stats_en = 1;
940 memset(&init_data, 0, sizeof(init_data));
941 init_data.cid = p_ll2_conn->cid;
942 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
943 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
945 rc = qed_sp_init_request(p_hwfn, &p_ent,
946 CORE_RAMROD_TX_QUEUE_START,
947 PROTOCOLID_CORE, &init_data);
951 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
953 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
954 p_ramrod->sb_index = p_tx->tx_sb_index;
955 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
956 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
957 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
959 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
960 qed_chain_get_pbl_phys(&p_tx->txq_chain));
961 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
962 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
964 switch (p_ll2_conn->input.tx_tc) {
966 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
969 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
972 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
976 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
979 case QED_LL2_TYPE_FCOE:
980 p_ramrod->conn_type = PROTOCOLID_FCOE;
982 case QED_LL2_TYPE_ISCSI:
983 p_ramrod->conn_type = PROTOCOLID_ISCSI;
985 case QED_LL2_TYPE_ROCE:
986 p_ramrod->conn_type = PROTOCOLID_ROCE;
988 case QED_LL2_TYPE_IWARP:
989 p_ramrod->conn_type = PROTOCOLID_IWARP;
991 case QED_LL2_TYPE_OOO:
992 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
993 p_ramrod->conn_type = PROTOCOLID_ISCSI;
995 p_ramrod->conn_type = PROTOCOLID_IWARP;
998 p_ramrod->conn_type = PROTOCOLID_ETH;
999 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1002 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
1004 return qed_spq_post(p_hwfn, p_ent, NULL);
1007 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1008 struct qed_ll2_info *p_ll2_conn)
1010 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1011 struct qed_spq_entry *p_ent = NULL;
1012 struct qed_sp_init_data init_data;
1016 memset(&init_data, 0, sizeof(init_data));
1017 init_data.cid = p_ll2_conn->cid;
1018 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1019 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1021 rc = qed_sp_init_request(p_hwfn, &p_ent,
1022 CORE_RAMROD_RX_QUEUE_STOP,
1023 PROTOCOLID_CORE, &init_data);
1027 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1029 p_ramrod->complete_event_flg = 1;
1030 p_ramrod->queue_id = p_ll2_conn->queue_id;
1032 return qed_spq_post(p_hwfn, p_ent, NULL);
1035 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1036 struct qed_ll2_info *p_ll2_conn)
1038 struct qed_spq_entry *p_ent = NULL;
1039 struct qed_sp_init_data init_data;
1043 memset(&init_data, 0, sizeof(init_data));
1044 init_data.cid = p_ll2_conn->cid;
1045 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1046 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1048 rc = qed_sp_init_request(p_hwfn, &p_ent,
1049 CORE_RAMROD_TX_QUEUE_STOP,
1050 PROTOCOLID_CORE, &init_data);
1054 return qed_spq_post(p_hwfn, p_ent, NULL);
1058 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1059 struct qed_ll2_info *p_ll2_info)
1061 struct qed_ll2_rx_packet *p_descq;
1065 if (!p_ll2_info->input.rx_num_desc)
1068 rc = qed_chain_alloc(p_hwfn->cdev,
1069 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1070 QED_CHAIN_MODE_NEXT_PTR,
1071 QED_CHAIN_CNT_TYPE_U16,
1072 p_ll2_info->input.rx_num_desc,
1073 sizeof(struct core_rx_bd),
1074 &p_ll2_info->rx_queue.rxq_chain, NULL);
1076 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1080 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1081 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1085 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1088 p_ll2_info->rx_queue.descq_array = p_descq;
1090 rc = qed_chain_alloc(p_hwfn->cdev,
1091 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1093 QED_CHAIN_CNT_TYPE_U16,
1094 p_ll2_info->input.rx_num_desc,
1095 sizeof(struct core_rx_fast_path_cqe),
1096 &p_ll2_info->rx_queue.rcq_chain, NULL);
1098 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1102 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1103 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1104 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
1110 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1111 struct qed_ll2_info *p_ll2_info)
1113 struct qed_ll2_tx_packet *p_descq;
1117 if (!p_ll2_info->input.tx_num_desc)
1120 rc = qed_chain_alloc(p_hwfn->cdev,
1121 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1123 QED_CHAIN_CNT_TYPE_U16,
1124 p_ll2_info->input.tx_num_desc,
1125 sizeof(struct core_tx_bd),
1126 &p_ll2_info->tx_queue.txq_chain, NULL);
1130 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1131 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
1137 p_ll2_info->tx_queue.descq_array = p_descq;
1139 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1140 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1141 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
1146 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1147 p_ll2_info->input.tx_num_desc);
1152 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1153 struct qed_ll2_info *p_ll2_info, u16 mtu)
1155 struct qed_ooo_buffer *p_buf = NULL;
1160 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
1163 /* Correct number of requested OOO buffers if needed */
1164 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1165 u16 num_desc = p_ll2_info->input.rx_num_desc;
1169 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1172 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1174 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1180 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1181 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1182 ETH_CACHE_LINE_SIZE - 1) &
1183 ~(ETH_CACHE_LINE_SIZE - 1);
1184 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1185 p_buf->rx_buffer_size,
1186 &p_buf->rx_buffer_phys_addr,
1194 p_buf->rx_buffer_virt_addr = p_virt;
1195 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1198 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1199 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1200 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1207 qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1209 if (!cbs || (!cbs->rx_comp_cb ||
1210 !cbs->rx_release_cb ||
1211 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1214 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1215 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1216 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1217 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
1218 p_ll2_info->cbs.cookie = cbs->cookie;
1223 static enum core_error_handle
1224 qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1227 case QED_LL2_DROP_PACKET:
1228 return LL2_DROP_PACKET;
1229 case QED_LL2_DO_NOTHING:
1230 return LL2_DO_NOTHING;
1231 case QED_LL2_ASSERT:
1234 return LL2_DO_NOTHING;
1238 int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
1240 struct qed_hwfn *p_hwfn = cxt;
1241 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1242 struct qed_ll2_info *p_ll2_info = NULL;
1246 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
1249 /* Find a free connection to be used */
1250 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1251 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1252 if (p_hwfn->p_ll2_info[i].b_active) {
1253 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1257 p_hwfn->p_ll2_info[i].b_active = true;
1258 p_ll2_info = &p_hwfn->p_ll2_info[i];
1259 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1265 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
1267 p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
1268 CORE_TX_DEST_NW : CORE_TX_DEST_LB;
1270 /* Correct maximum number of Tx BDs */
1271 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1273 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1275 *p_tx_max = min_t(u8, *p_tx_max,
1276 CORE_LL2_TX_MAX_BDS_PER_PACKET);
1278 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1280 DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1281 goto q_allocate_fail;
1284 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
1286 goto q_allocate_fail;
1288 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
1290 goto q_allocate_fail;
1292 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1295 goto q_allocate_fail;
1297 /* Register callbacks for the Rx/Tx queues */
1298 if (data->input.conn_type == QED_LL2_TYPE_OOO) {
1299 comp_rx_cb = qed_ll2_lb_rxq_completion;
1300 comp_tx_cb = qed_ll2_lb_txq_completion;
1302 comp_rx_cb = qed_ll2_rxq_completion;
1303 comp_tx_cb = qed_ll2_txq_completion;
1306 if (data->input.rx_num_desc) {
1307 qed_int_register_cb(p_hwfn, comp_rx_cb,
1308 &p_hwfn->p_ll2_info[i],
1309 &p_ll2_info->rx_queue.rx_sb_index,
1310 &p_ll2_info->rx_queue.p_fw_cons);
1311 p_ll2_info->rx_queue.b_cb_registred = true;
1314 if (data->input.tx_num_desc) {
1315 qed_int_register_cb(p_hwfn,
1317 &p_hwfn->p_ll2_info[i],
1318 &p_ll2_info->tx_queue.tx_sb_index,
1319 &p_ll2_info->tx_queue.p_fw_cons);
1320 p_ll2_info->tx_queue.b_cb_registred = true;
1323 *data->p_connection_handle = i;
1327 qed_ll2_release_connection(p_hwfn, i);
1331 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1332 struct qed_ll2_info *p_ll2_conn)
1334 enum qed_ll2_error_handle error_input;
1335 enum core_error_handle error_mode;
1336 u8 action_on_error = 0;
1338 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1341 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1342 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1343 error_mode = qed_ll2_get_error_choice(error_input);
1344 SET_FIELD(action_on_error,
1345 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1346 error_input = p_ll2_conn->input.ai_err_no_buf;
1347 error_mode = qed_ll2_get_error_choice(error_input);
1348 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
1350 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1354 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1355 struct qed_ll2_info *p_ll2_conn)
1357 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
1360 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1361 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1364 int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
1366 struct qed_hwfn *p_hwfn = cxt;
1367 struct qed_ll2_info *p_ll2_conn;
1368 struct qed_ll2_rx_queue *p_rx;
1369 struct qed_ll2_tx_queue *p_tx;
1370 struct qed_ptt *p_ptt;
1375 p_ptt = qed_ptt_acquire(p_hwfn);
1379 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1385 p_rx = &p_ll2_conn->rx_queue;
1386 p_tx = &p_ll2_conn->tx_queue;
1388 qed_chain_reset(&p_rx->rxq_chain);
1389 qed_chain_reset(&p_rx->rcq_chain);
1390 INIT_LIST_HEAD(&p_rx->active_descq);
1391 INIT_LIST_HEAD(&p_rx->free_descq);
1392 INIT_LIST_HEAD(&p_rx->posting_descq);
1393 spin_lock_init(&p_rx->lock);
1394 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1395 for (i = 0; i < capacity; i++)
1396 list_add_tail(&p_rx->descq_array[i].list_entry,
1398 *p_rx->p_fw_cons = 0;
1400 qed_chain_reset(&p_tx->txq_chain);
1401 INIT_LIST_HEAD(&p_tx->active_descq);
1402 INIT_LIST_HEAD(&p_tx->free_descq);
1403 INIT_LIST_HEAD(&p_tx->sending_descq);
1404 spin_lock_init(&p_tx->lock);
1405 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1406 for (i = 0; i < capacity; i++)
1407 list_add_tail(&p_tx->descq_array[i].list_entry,
1409 p_tx->cur_completing_bd_idx = 0;
1411 p_tx->b_completing_packet = false;
1412 p_tx->cur_send_packet = NULL;
1413 p_tx->cur_send_frag_num = 0;
1414 p_tx->cur_completing_frag_num = 0;
1415 *p_tx->p_fw_cons = 0;
1417 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1421 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1422 p_ll2_conn->queue_id = qid;
1423 p_ll2_conn->tx_stats_id = qid;
1424 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1425 GTT_BAR0_MAP_REG_TSDM_RAM +
1426 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1427 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1428 qed_db_addr(p_ll2_conn->cid,
1431 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1435 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1439 if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
1440 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
1442 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1444 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
1445 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
1447 QED_LLH_FILTER_ETHERTYPE);
1448 qed_llh_add_protocol_filter(p_hwfn, p_ptt,
1450 QED_LLH_FILTER_ETHERTYPE);
1454 qed_ptt_release(p_hwfn, p_ptt);
1458 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1459 struct qed_ll2_rx_queue *p_rx,
1460 struct qed_ll2_rx_packet *p_curp)
1462 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1463 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1464 bool b_notify_fw = false;
1465 u16 bd_prod, cq_prod;
1467 /* This handles the flushing of already posted buffers */
1468 while (!list_empty(&p_rx->posting_descq)) {
1469 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1470 struct qed_ll2_rx_packet,
1472 list_move_tail(&p_posting_packet->list_entry,
1473 &p_rx->active_descq);
1477 /* This handles the supplied packet [if there is one] */
1479 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1486 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1487 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1488 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1489 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1491 /* Make sure chain element is updated before ringing the doorbell */
1494 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1497 int qed_ll2_post_rx_buffer(void *cxt,
1498 u8 connection_handle,
1500 u16 buf_len, void *cookie, u8 notify_fw)
1502 struct qed_hwfn *p_hwfn = cxt;
1503 struct core_rx_bd_with_buff_len *p_curb = NULL;
1504 struct qed_ll2_rx_packet *p_curp = NULL;
1505 struct qed_ll2_info *p_ll2_conn;
1506 struct qed_ll2_rx_queue *p_rx;
1507 unsigned long flags;
1511 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1514 p_rx = &p_ll2_conn->rx_queue;
1516 spin_lock_irqsave(&p_rx->lock, flags);
1517 if (!list_empty(&p_rx->free_descq))
1518 p_curp = list_first_entry(&p_rx->free_descq,
1519 struct qed_ll2_rx_packet, list_entry);
1521 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1522 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1523 p_data = qed_chain_produce(&p_rx->rxq_chain);
1524 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1525 qed_chain_produce(&p_rx->rcq_chain);
1529 /* If we're lacking entires, let's try to flush buffers to FW */
1530 if (!p_curp || !p_curb) {
1536 /* We have an Rx packet we can fill */
1537 DMA_REGPAIR_LE(p_curb->addr, addr);
1538 p_curb->buff_length = cpu_to_le16(buf_len);
1539 p_curp->rx_buf_addr = addr;
1540 p_curp->cookie = cookie;
1541 p_curp->rxq_bd = p_curb;
1542 p_curp->buf_length = buf_len;
1543 list_del(&p_curp->list_entry);
1545 /* Check if we only want to enqueue this packet without informing FW */
1547 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1552 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1554 spin_unlock_irqrestore(&p_rx->lock, flags);
1558 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1559 struct qed_ll2_tx_queue *p_tx,
1560 struct qed_ll2_tx_packet *p_curp,
1561 struct qed_ll2_tx_pkt_info *pkt,
1564 list_del(&p_curp->list_entry);
1565 p_curp->cookie = pkt->cookie;
1566 p_curp->bd_used = pkt->num_of_bds;
1567 p_curp->notify_fw = notify_fw;
1568 p_tx->cur_send_packet = p_curp;
1569 p_tx->cur_send_frag_num = 0;
1571 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1572 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
1573 p_tx->cur_send_frag_num++;
1577 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1578 struct qed_ll2_info *p_ll2,
1579 struct qed_ll2_tx_packet *p_curp,
1580 struct qed_ll2_tx_pkt_info *pkt)
1582 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1583 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1584 struct core_tx_bd *start_bd = NULL;
1585 enum core_roce_flavor_type roce_flavor;
1586 enum core_tx_dest tx_dest;
1587 u16 bd_data = 0, frag_idx;
1589 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1592 tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
1595 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1596 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
1597 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1598 cpu_to_le16(pkt->l4_hdr_offset_w));
1599 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1600 bd_data |= pkt->bd_flags;
1601 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
1602 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
1603 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1604 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
1605 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1606 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
1609 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1610 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1613 p_ll2->input.conn_type,
1615 pkt->first_frag_len,
1617 le32_to_cpu(start_bd->addr.hi),
1618 le32_to_cpu(start_bd->addr.lo));
1620 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
1623 /* Need to provide the packet with additional BDs for frags */
1624 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1625 frag_idx < pkt->num_of_bds; frag_idx++) {
1626 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1628 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1629 (*p_bd)->bd_data.as_bitfield = 0;
1630 (*p_bd)->bitfield1 = 0;
1631 p_curp->bds_set[frag_idx].tx_frag = 0;
1632 p_curp->bds_set[frag_idx].frag_len = 0;
1636 /* This should be called while the Txq spinlock is being held */
1637 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1638 struct qed_ll2_info *p_ll2_conn)
1640 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1641 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1642 struct qed_ll2_tx_packet *p_pkt = NULL;
1643 struct core_db_data db_msg = { 0, 0, 0 };
1646 /* If there are missing BDs, don't do anything now */
1647 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1648 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1651 /* Push the current packet to the list and clean after it */
1652 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1653 &p_ll2_conn->tx_queue.sending_descq);
1654 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1655 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1657 /* Notify FW of packet only if requested to */
1661 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1663 while (!list_empty(&p_tx->sending_descq)) {
1664 p_pkt = list_first_entry(&p_tx->sending_descq,
1665 struct qed_ll2_tx_packet, list_entry);
1669 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1672 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1673 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1674 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1675 DQ_XCM_CORE_TX_BD_PROD_CMD);
1676 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1677 db_msg.spq_prod = cpu_to_le16(bd_prod);
1679 /* Make sure the BDs data is updated before ringing the doorbell */
1682 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1685 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1686 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1687 p_ll2_conn->queue_id,
1689 p_ll2_conn->input.conn_type, db_msg.spq_prod);
1692 int qed_ll2_prepare_tx_packet(void *cxt,
1693 u8 connection_handle,
1694 struct qed_ll2_tx_pkt_info *pkt,
1697 struct qed_hwfn *p_hwfn = cxt;
1698 struct qed_ll2_tx_packet *p_curp = NULL;
1699 struct qed_ll2_info *p_ll2_conn = NULL;
1700 struct qed_ll2_tx_queue *p_tx;
1701 struct qed_chain *p_tx_chain;
1702 unsigned long flags;
1705 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1708 p_tx = &p_ll2_conn->tx_queue;
1709 p_tx_chain = &p_tx->txq_chain;
1711 if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1714 spin_lock_irqsave(&p_tx->lock, flags);
1715 if (p_tx->cur_send_packet) {
1720 /* Get entry, but only if we have tx elements for it */
1721 if (!list_empty(&p_tx->free_descq))
1722 p_curp = list_first_entry(&p_tx->free_descq,
1723 struct qed_ll2_tx_packet, list_entry);
1724 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
1732 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1733 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1735 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
1737 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1740 spin_unlock_irqrestore(&p_tx->lock, flags);
1744 int qed_ll2_set_fragment_of_tx_packet(void *cxt,
1745 u8 connection_handle,
1746 dma_addr_t addr, u16 nbytes)
1748 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1749 struct qed_hwfn *p_hwfn = cxt;
1750 struct qed_ll2_info *p_ll2_conn = NULL;
1751 u16 cur_send_frag_num = 0;
1752 struct core_tx_bd *p_bd;
1753 unsigned long flags;
1755 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1759 if (!p_ll2_conn->tx_queue.cur_send_packet)
1762 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1763 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1765 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1768 /* Fill the BD information, and possibly notify FW */
1769 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1770 DMA_REGPAIR_LE(p_bd->addr, addr);
1771 p_bd->nbytes = cpu_to_le16(nbytes);
1772 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1773 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1775 p_ll2_conn->tx_queue.cur_send_frag_num++;
1777 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1778 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1779 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1784 int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
1786 struct qed_hwfn *p_hwfn = cxt;
1787 struct qed_ll2_info *p_ll2_conn = NULL;
1789 struct qed_ptt *p_ptt;
1791 p_ptt = qed_ptt_acquire(p_hwfn);
1795 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1801 /* Stop Tx & Rx of connection, if needed */
1802 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1803 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1806 qed_ll2_txq_flush(p_hwfn, connection_handle);
1809 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1810 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1813 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1816 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
1817 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1819 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
1820 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
1822 QED_LLH_FILTER_ETHERTYPE);
1823 qed_llh_remove_protocol_filter(p_hwfn, p_ptt,
1825 QED_LLH_FILTER_ETHERTYPE);
1829 qed_ptt_release(p_hwfn, p_ptt);
1833 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1834 struct qed_ll2_info *p_ll2_conn)
1836 struct qed_ooo_buffer *p_buffer;
1838 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
1841 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1842 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
1843 p_hwfn->p_ooo_info))) {
1844 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1845 p_buffer->rx_buffer_size,
1846 p_buffer->rx_buffer_virt_addr,
1847 p_buffer->rx_buffer_phys_addr);
1852 void qed_ll2_release_connection(void *cxt, u8 connection_handle)
1854 struct qed_hwfn *p_hwfn = cxt;
1855 struct qed_ll2_info *p_ll2_conn = NULL;
1857 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1861 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1862 p_ll2_conn->rx_queue.b_cb_registred = false;
1863 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1866 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1867 p_ll2_conn->tx_queue.b_cb_registred = false;
1868 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1871 kfree(p_ll2_conn->tx_queue.descq_array);
1872 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1874 kfree(p_ll2_conn->rx_queue.descq_array);
1875 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1876 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1878 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1880 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
1882 mutex_lock(&p_ll2_conn->mutex);
1883 p_ll2_conn->b_active = false;
1884 mutex_unlock(&p_ll2_conn->mutex);
1887 int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1889 struct qed_ll2_info *p_ll2_connections;
1892 /* Allocate LL2's set struct */
1893 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1894 sizeof(struct qed_ll2_info), GFP_KERNEL);
1895 if (!p_ll2_connections) {
1896 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1900 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1901 p_ll2_connections[i].my_id = i;
1903 p_hwfn->p_ll2_info = p_ll2_connections;
1907 void qed_ll2_setup(struct qed_hwfn *p_hwfn)
1911 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1912 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
1915 void qed_ll2_free(struct qed_hwfn *p_hwfn)
1917 if (!p_hwfn->p_ll2_info)
1920 kfree(p_hwfn->p_ll2_info);
1921 p_hwfn->p_ll2_info = NULL;
1924 static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
1925 struct qed_ptt *p_ptt,
1926 struct qed_ll2_stats *p_stats)
1928 struct core_ll2_port_stats port_stats;
1930 memset(&port_stats, 0, sizeof(port_stats));
1931 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1932 BAR0_MAP_REG_TSDM_RAM +
1933 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
1934 sizeof(port_stats));
1936 p_stats->gsi_invalid_hdr = HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
1937 p_stats->gsi_invalid_pkt_length =
1938 HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
1939 p_stats->gsi_unsupported_pkt_typ =
1940 HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
1941 p_stats->gsi_crcchksm_error =
1942 HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
1945 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1946 struct qed_ptt *p_ptt,
1947 struct qed_ll2_info *p_ll2_conn,
1948 struct qed_ll2_stats *p_stats)
1950 struct core_ll2_tstorm_per_queue_stat tstats;
1951 u8 qid = p_ll2_conn->queue_id;
1954 memset(&tstats, 0, sizeof(tstats));
1955 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1956 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1957 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1959 p_stats->packet_too_big_discard =
1960 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1961 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1964 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1965 struct qed_ptt *p_ptt,
1966 struct qed_ll2_info *p_ll2_conn,
1967 struct qed_ll2_stats *p_stats)
1969 struct core_ll2_ustorm_per_queue_stat ustats;
1970 u8 qid = p_ll2_conn->queue_id;
1973 memset(&ustats, 0, sizeof(ustats));
1974 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1975 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1976 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1978 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1979 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1980 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1981 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1982 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1983 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1986 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1987 struct qed_ptt *p_ptt,
1988 struct qed_ll2_info *p_ll2_conn,
1989 struct qed_ll2_stats *p_stats)
1991 struct core_ll2_pstorm_per_queue_stat pstats;
1992 u8 stats_id = p_ll2_conn->tx_stats_id;
1995 memset(&pstats, 0, sizeof(pstats));
1996 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1997 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1998 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2000 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2001 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2002 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2003 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2004 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2005 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2008 int qed_ll2_get_stats(void *cxt,
2009 u8 connection_handle, struct qed_ll2_stats *p_stats)
2011 struct qed_hwfn *p_hwfn = cxt;
2012 struct qed_ll2_info *p_ll2_conn = NULL;
2013 struct qed_ptt *p_ptt;
2015 memset(p_stats, 0, sizeof(*p_stats));
2017 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2018 !p_hwfn->p_ll2_info)
2021 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2023 p_ptt = qed_ptt_acquire(p_hwfn);
2025 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2029 if (p_ll2_conn->input.gsi_enable)
2030 _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
2031 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2032 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2033 if (p_ll2_conn->tx_stats_en)
2034 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2036 qed_ptt_release(p_hwfn, p_ptt);
2040 static void qed_ll2b_release_rx_packet(void *cxt,
2041 u8 connection_handle,
2043 dma_addr_t rx_buf_addr,
2046 struct qed_hwfn *p_hwfn = cxt;
2048 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2051 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2052 const struct qed_ll2_cb_ops *ops,
2055 cdev->ll2->cbs = ops;
2056 cdev->ll2->cb_cookie = cookie;
2059 struct qed_ll2_cbs ll2_cbs = {
2060 .rx_comp_cb = &qed_ll2b_complete_rx_packet,
2061 .rx_release_cb = &qed_ll2b_release_rx_packet,
2062 .tx_comp_cb = &qed_ll2b_complete_tx_packet,
2063 .tx_release_cb = &qed_ll2b_complete_tx_packet,
2066 static void qed_ll2_set_conn_data(struct qed_dev *cdev,
2067 struct qed_ll2_acquire_data *data,
2068 struct qed_ll2_params *params,
2069 enum qed_ll2_conn_type conn_type,
2070 u8 *handle, bool lb)
2072 memset(data, 0, sizeof(*data));
2074 data->input.conn_type = conn_type;
2075 data->input.mtu = params->mtu;
2076 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2077 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2078 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2079 data->input.tx_num_desc = QED_LL2_TX_SIZE;
2080 data->p_connection_handle = handle;
2081 data->cbs = &ll2_cbs;
2082 ll2_cbs.cookie = QED_LEADING_HWFN(cdev);
2085 data->input.tx_tc = PKT_LB_TC;
2086 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2088 data->input.tx_tc = 0;
2089 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2093 static int qed_ll2_start_ooo(struct qed_dev *cdev,
2094 struct qed_ll2_params *params)
2096 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2097 u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2098 struct qed_ll2_acquire_data data;
2101 qed_ll2_set_conn_data(cdev, &data, params,
2102 QED_LL2_TYPE_OOO, handle, true);
2104 rc = qed_ll2_acquire_connection(hwfn, &data);
2106 DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
2110 rc = qed_ll2_establish_connection(hwfn, *handle);
2112 DP_INFO(cdev, "Failed to establist LL2 OOO connection\n");
2119 qed_ll2_release_connection(hwfn, *handle);
2121 *handle = QED_LL2_UNUSED_HANDLE;
2125 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2127 struct qed_ll2_buffer *buffer, *tmp_buffer;
2128 enum qed_ll2_conn_type conn_type;
2129 struct qed_ll2_acquire_data data;
2130 struct qed_ptt *p_ptt;
2134 /* Initialize LL2 locks & lists */
2135 INIT_LIST_HEAD(&cdev->ll2->list);
2136 spin_lock_init(&cdev->ll2->lock);
2137 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2138 L1_CACHE_BYTES + params->mtu;
2140 /*Allocate memory for LL2 */
2141 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
2142 cdev->ll2->rx_size);
2143 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
2144 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2146 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2150 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2151 &buffer->phys_addr);
2157 list_add_tail(&buffer->list, &cdev->ll2->list);
2160 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
2162 conn_type = QED_LL2_TYPE_FCOE;
2165 conn_type = QED_LL2_TYPE_ISCSI;
2167 case QED_PCI_ETH_ROCE:
2168 conn_type = QED_LL2_TYPE_ROCE;
2171 conn_type = QED_LL2_TYPE_TEST;
2174 qed_ll2_set_conn_data(cdev, &data, params, conn_type,
2175 &cdev->ll2->handle, false);
2177 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &data);
2179 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
2183 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2186 DP_INFO(cdev, "Failed to establish LL2 connection\n");
2190 /* Post all Rx buffers to FW */
2191 spin_lock_bh(&cdev->ll2->lock);
2192 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
2193 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2195 buffer->phys_addr, 0, buffer, 1);
2198 "Failed to post an Rx buffer; Deleting it\n");
2199 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2200 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2201 kfree(buffer->data);
2202 list_del(&buffer->list);
2205 cdev->ll2->rx_cnt++;
2208 spin_unlock_bh(&cdev->ll2->lock);
2210 if (!cdev->ll2->rx_cnt) {
2211 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
2212 goto release_terminate;
2215 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2216 DP_INFO(cdev, "Invalid Ethernet address\n");
2217 goto release_terminate;
2220 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2221 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
2222 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2223 rc = qed_ll2_start_ooo(cdev, params);
2226 "Failed to initialize the OOO LL2 queue\n");
2227 goto release_terminate;
2231 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2233 DP_INFO(cdev, "Failed to acquire PTT\n");
2234 goto release_terminate;
2237 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2238 params->ll2_mac_address);
2239 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2241 DP_ERR(cdev, "Failed to allocate LLH filter\n");
2242 goto release_terminate_all;
2245 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2248 release_terminate_all:
2251 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2253 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2255 qed_ll2_kill_buffers(cdev);
2256 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2260 static int qed_ll2_stop(struct qed_dev *cdev)
2262 struct qed_ptt *p_ptt;
2265 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2268 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2270 DP_INFO(cdev, "Failed to acquire PTT\n");
2274 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2275 cdev->ll2_mac_address);
2276 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2277 eth_zero_addr(cdev->ll2_mac_address);
2279 if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
2280 cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
2281 qed_ll2_stop_ooo(cdev);
2283 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2286 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2288 qed_ll2_kill_buffers(cdev);
2290 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
2291 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2298 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
2300 struct qed_ll2_tx_pkt_info pkt;
2301 const skb_frag_t *frag;
2302 u8 flags = 0, nr_frags;
2303 int rc = -EINVAL, i;
2307 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2308 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
2312 /* Cache number of fragments from SKB since SKB may be freed by
2313 * the completion routine after calling qed_ll2_prepare_tx_packet()
2315 nr_frags = skb_shinfo(skb)->nr_frags;
2317 if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2318 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2323 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2324 skb->len, DMA_TO_DEVICE);
2325 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2326 DP_NOTICE(cdev, "SKB mapping failed\n");
2330 /* Request HW to calculate IP csum */
2331 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2332 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2333 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
2335 if (skb_vlan_tag_present(skb)) {
2336 vlan = skb_vlan_tag_get(skb);
2337 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
2340 memset(&pkt, 0, sizeof(pkt));
2341 pkt.num_of_bds = 1 + nr_frags;
2343 pkt.bd_flags = flags;
2344 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2345 pkt.first_frag = mapping;
2346 pkt.first_frag_len = skb->len;
2349 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2350 * there are no fragments in the skb and subsequently the completion
2351 * routine may run and free the SKB, so no dereferencing the SKB
2352 * beyond this point unless skb has any fragments.
2354 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2359 for (i = 0; i < nr_frags; i++) {
2360 frag = &skb_shinfo(skb)->frags[i];
2362 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2363 skb_frag_size(frag), DMA_TO_DEVICE);
2365 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2367 "Unable to map frag - dropping packet\n");
2372 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2375 skb_frag_size(frag));
2377 /* if failed not much to do here, partial packet has been posted
2378 * we can't free memory, will need to wait for completion.
2387 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2393 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2398 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2399 cdev->ll2->handle, stats);
2402 const struct qed_ll2_ops qed_ll2_ops_pass = {
2403 .start = &qed_ll2_start,
2404 .stop = &qed_ll2_stop,
2405 .start_xmit = &qed_ll2_start_xmit,
2406 .register_cb_ops = &qed_ll2_register_cb_ops,
2407 .get_stats = &qed_ll2_stats,
2410 int qed_ll2_alloc_if(struct qed_dev *cdev)
2412 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2413 return cdev->ll2 ? 0 : -ENOMEM;
2416 void qed_ll2_dealloc_if(struct qed_dev *cdev)