1 /* QLogic qed NIC Driver
3 * Copyright (c) 2015 QLogic Corporation
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
10 #include <linux/types.h>
11 #include <asm/byteorder.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/if_vlan.h>
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/stddef.h>
18 #include <linux/version.h>
19 #include <linux/workqueue.h>
21 #include <linux/bitops.h>
22 #include <linux/delay.h>
23 #include <linux/errno.h>
24 #include <linux/etherdevice.h>
26 #include <linux/list.h>
27 #include <linux/mutex.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/qed/qed_ll2_if.h>
33 #include "qed_dev_api.h"
39 #include "qed_reg_addr.h"
43 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
44 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
46 #define QED_LL2_TX_SIZE (256)
47 #define QED_LL2_RX_SIZE (4096)
49 struct qed_cb_ll2_info {
55 /* Lock protecting LL2 buffer lists in sleepless context */
57 struct list_head list;
59 const struct qed_ll2_cb_ops *cbs;
63 struct qed_ll2_buffer {
64 struct list_head list;
69 static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
72 dma_addr_t first_frag_addr,
76 struct qed_dev *cdev = p_hwfn->cdev;
77 struct sk_buff *skb = cookie;
79 /* All we need to do is release the mapping */
80 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
81 skb_headlen(skb), DMA_TO_DEVICE);
83 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
84 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
87 if (cdev->ll2->frags_mapped)
88 /* Case where mapped frags were received, need to
89 * free skb with nr_frags marked as 0
91 skb_shinfo(skb)->nr_frags = 0;
93 dev_kfree_skb_any(skb);
96 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
97 u8 **data, dma_addr_t *phys_addr)
99 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
101 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
105 *phys_addr = dma_map_single(&cdev->pdev->dev,
106 ((*data) + NET_SKB_PAD),
107 cdev->ll2->rx_size, DMA_FROM_DEVICE);
108 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
109 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
117 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
118 struct qed_ll2_buffer *buffer)
120 spin_lock_bh(&cdev->ll2->lock);
122 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
123 cdev->ll2->rx_size, DMA_FROM_DEVICE);
125 list_del(&buffer->list);
128 if (!cdev->ll2->rx_cnt)
129 DP_INFO(cdev, "All LL2 entries were removed\n");
131 spin_unlock_bh(&cdev->ll2->lock);
136 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
138 struct qed_ll2_buffer *buffer, *tmp_buffer;
140 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
141 qed_ll2_dealloc_buffer(cdev, buffer);
144 static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
145 u8 connection_handle,
146 struct qed_ll2_rx_packet *p_pkt,
147 struct core_rx_fast_path_cqe *p_cqe,
150 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
151 struct qed_ll2_buffer *buffer = p_pkt->cookie;
152 struct qed_dev *cdev = p_hwfn->cdev;
153 u16 vlan = le16_to_cpu(p_cqe->vlan);
154 u32 opaque_data_0, opaque_data_1;
155 u8 pad = p_cqe->placement_offset;
156 dma_addr_t new_phys_addr;
162 opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
163 opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
166 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
167 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
168 (u64)p_pkt->rx_buf_addr, pad, packet_length,
169 le16_to_cpu(p_cqe->parse_flags.flags), vlan,
170 opaque_data_0, opaque_data_1);
172 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
173 print_hex_dump(KERN_INFO, "",
174 DUMP_PREFIX_OFFSET, 16, 1,
175 buffer->data, packet_length, false);
178 /* Determine if data is valid */
179 if (packet_length < ETH_HLEN)
182 /* Allocate a replacement for buffer; Reuse upon failure */
184 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
187 /* If need to reuse or there's no replacement buffer, repost this */
190 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
191 cdev->ll2->rx_size, DMA_FROM_DEVICE);
193 skb = build_skb(buffer->data, 0);
200 skb_reserve(skb, pad);
201 skb_put(skb, packet_length);
202 skb_checksum_none_assert(skb);
204 /* Get parital ethernet information instead of eth_type_trans(),
205 * Since we don't have an associated net_device.
207 skb_reset_mac_header(skb);
208 skb->protocol = eth_hdr(skb)->h_proto;
210 /* Pass SKB onward */
211 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
213 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
214 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
215 opaque_data_0, opaque_data_1);
218 /* Update Buffer information and update FW producer */
219 buffer->data = new_data;
220 buffer->phys_addr = new_phys_addr;
223 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
224 buffer->phys_addr, 0, buffer, 1);
227 qed_ll2_dealloc_buffer(cdev, buffer);
230 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
231 u8 connection_handle,
235 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
237 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
240 if (!p_hwfn->p_ll2_info)
243 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
247 mutex_lock(&p_ll2_conn->mutex);
248 if (p_ll2_conn->b_active)
251 mutex_unlock(&p_ll2_conn->mutex);
259 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
260 u8 connection_handle)
262 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
265 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
266 u8 connection_handle)
268 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
271 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
273 u8 connection_handle)
275 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
278 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
280 bool b_last_packet = false, b_last_frag = false;
281 struct qed_ll2_tx_packet *p_pkt = NULL;
282 struct qed_ll2_info *p_ll2_conn;
283 struct qed_ll2_tx_queue *p_tx;
286 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
290 p_tx = &p_ll2_conn->tx_queue;
292 while (!list_empty(&p_tx->active_descq)) {
293 p_pkt = list_first_entry(&p_tx->active_descq,
294 struct qed_ll2_tx_packet, list_entry);
298 list_del(&p_pkt->list_entry);
299 b_last_packet = list_empty(&p_tx->active_descq);
300 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
301 p_tx->cur_completing_packet = *p_pkt;
302 p_tx->cur_completing_bd_idx = 1;
303 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
304 tx_frag = p_pkt->bds_set[0].tx_frag;
305 if (p_ll2_conn->gsi_enable)
306 qed_ll2b_release_tx_gsi_packet(p_hwfn,
313 qed_ll2b_complete_tx_packet(p_hwfn,
323 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
325 struct qed_ll2_info *p_ll2_conn = p_cookie;
326 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
327 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
328 struct qed_ll2_tx_packet *p_pkt;
329 bool b_last_frag = false;
334 spin_lock_irqsave(&p_tx->lock, flags);
335 if (p_tx->b_completing_packet) {
340 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
341 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
343 if (list_empty(&p_tx->active_descq))
346 p_pkt = list_first_entry(&p_tx->active_descq,
347 struct qed_ll2_tx_packet, list_entry);
351 p_tx->b_completing_packet = true;
352 p_tx->cur_completing_packet = *p_pkt;
353 num_bds_in_packet = p_pkt->bd_used;
354 list_del(&p_pkt->list_entry);
356 if (num_bds < num_bds_in_packet) {
358 "Rest of BDs does not cover whole packet\n");
362 num_bds -= num_bds_in_packet;
363 p_tx->bds_idx += num_bds_in_packet;
364 while (num_bds_in_packet--)
365 qed_chain_consume(&p_tx->txq_chain);
367 p_tx->cur_completing_bd_idx = 1;
368 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
369 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
371 spin_unlock_irqrestore(&p_tx->lock, flags);
372 tx_frag = p_pkt->bds_set[0].tx_frag;
373 if (p_ll2_conn->gsi_enable)
374 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
378 b_last_frag, !num_bds);
380 qed_ll2b_complete_tx_packet(p_hwfn,
384 b_last_frag, !num_bds);
385 spin_lock_irqsave(&p_tx->lock, flags);
388 p_tx->b_completing_packet = false;
391 spin_unlock_irqrestore(&p_tx->lock, flags);
396 qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
397 struct qed_ll2_info *p_ll2_info,
398 union core_rx_cqe_union *p_cqe,
399 unsigned long lock_flags, bool b_last_cqe)
401 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
402 struct qed_ll2_rx_packet *p_pkt = NULL;
403 u16 packet_length, parse_flags, vlan;
407 if (!list_empty(&p_rx->active_descq))
408 p_pkt = list_first_entry(&p_rx->active_descq,
409 struct qed_ll2_rx_packet, list_entry);
412 "GSI Rx completion but active_descq is empty\n");
416 list_del(&p_pkt->list_entry);
417 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
418 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
419 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
420 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
421 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
422 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
424 "Mismatch between active_descq and the LL2 Rx chain\n");
425 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
427 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
428 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
433 p_cqe->rx_cqe_gsi.data_length_error,
437 src_mac_addrlo, b_last_cqe);
438 spin_lock_irqsave(&p_rx->lock, lock_flags);
443 static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
444 struct qed_ll2_info *p_ll2_conn,
445 union core_rx_cqe_union *p_cqe,
446 unsigned long *p_lock_flags,
449 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
450 struct qed_ll2_rx_packet *p_pkt = NULL;
452 if (!list_empty(&p_rx->active_descq))
453 p_pkt = list_first_entry(&p_rx->active_descq,
454 struct qed_ll2_rx_packet, list_entry);
457 "LL2 Rx completion but active_descq is empty\n");
460 list_del(&p_pkt->list_entry);
462 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
464 "Mismatch between active_descq and the LL2 Rx chain\n");
465 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
467 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
468 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
469 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
470 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
475 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
477 struct qed_ll2_info *p_ll2_conn = cookie;
478 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
479 union core_rx_cqe_union *cqe = NULL;
480 u16 cq_new_idx = 0, cq_old_idx = 0;
481 unsigned long flags = 0;
484 spin_lock_irqsave(&p_rx->lock, flags);
485 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
486 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
488 while (cq_new_idx != cq_old_idx) {
489 bool b_last_cqe = (cq_new_idx == cq_old_idx);
491 cqe = qed_chain_consume(&p_rx->rcq_chain);
492 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
496 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
497 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
499 switch (cqe->rx_cqe_sp.type) {
500 case CORE_RX_CQE_TYPE_SLOW_PATH:
501 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
504 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
505 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
506 cqe, flags, b_last_cqe);
508 case CORE_RX_CQE_TYPE_REGULAR:
509 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
518 spin_unlock_irqrestore(&p_rx->lock, flags);
522 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
524 struct qed_ll2_info *p_ll2_conn = NULL;
525 struct qed_ll2_rx_packet *p_pkt = NULL;
526 struct qed_ll2_rx_queue *p_rx;
528 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
532 p_rx = &p_ll2_conn->rx_queue;
534 while (!list_empty(&p_rx->active_descq)) {
535 dma_addr_t rx_buf_addr;
539 p_pkt = list_first_entry(&p_rx->active_descq,
540 struct qed_ll2_rx_packet, list_entry);
544 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
546 rx_buf_addr = p_pkt->rx_buf_addr;
547 cookie = p_pkt->cookie;
549 b_last = list_empty(&p_rx->active_descq);
553 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
554 struct qed_ll2_info *p_ll2_conn,
557 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
558 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
559 struct core_rx_start_ramrod_data *p_ramrod = NULL;
560 struct qed_spq_entry *p_ent = NULL;
561 struct qed_sp_init_data init_data;
566 memset(&init_data, 0, sizeof(init_data));
567 init_data.cid = p_ll2_conn->cid;
568 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
569 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
571 rc = qed_sp_init_request(p_hwfn, &p_ent,
572 CORE_RAMROD_RX_QUEUE_START,
573 PROTOCOLID_CORE, &init_data);
577 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
579 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
580 p_ramrod->sb_index = p_rx->rx_sb_index;
581 p_ramrod->complete_event_flg = 1;
583 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
584 DMA_REGPAIR_LE(p_ramrod->bd_base,
585 p_rx->rxq_chain.p_phys_addr);
586 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
587 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
588 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
589 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
591 p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
592 p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
593 p_ramrod->queue_id = p_ll2_conn->queue_id;
594 p_ramrod->main_func_queue = 1;
596 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
597 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
598 p_ramrod->mf_si_bcast_accept_all = 1;
599 p_ramrod->mf_si_mcast_accept_all = 1;
601 p_ramrod->mf_si_bcast_accept_all = 0;
602 p_ramrod->mf_si_mcast_accept_all = 0;
605 p_ramrod->action_on_error.error_type = action_on_error;
606 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
607 return qed_spq_post(p_hwfn, p_ent, NULL);
610 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
611 struct qed_ll2_info *p_ll2_conn)
613 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
614 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
615 struct core_tx_start_ramrod_data *p_ramrod = NULL;
616 struct qed_spq_entry *p_ent = NULL;
617 struct qed_sp_init_data init_data;
618 union qed_qm_pq_params pq_params;
619 u16 pq_id = 0, pbl_size;
622 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
626 memset(&init_data, 0, sizeof(init_data));
627 init_data.cid = p_ll2_conn->cid;
628 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
629 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
631 rc = qed_sp_init_request(p_hwfn, &p_ent,
632 CORE_RAMROD_TX_QUEUE_START,
633 PROTOCOLID_CORE, &init_data);
637 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
639 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
640 p_ramrod->sb_index = p_tx->tx_sb_index;
641 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
642 p_ll2_conn->tx_stats_en = 1;
643 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
644 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
646 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
647 qed_chain_get_pbl_phys(&p_tx->txq_chain));
648 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
649 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
651 memset(&pq_params, 0, sizeof(pq_params));
652 pq_params.core.tc = p_ll2_conn->tx_tc;
653 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
654 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
657 case QED_LL2_TYPE_ISCSI:
658 case QED_LL2_TYPE_ISCSI_OOO:
659 p_ramrod->conn_type = PROTOCOLID_ISCSI;
661 case QED_LL2_TYPE_ROCE:
662 p_ramrod->conn_type = PROTOCOLID_ROCE;
665 p_ramrod->conn_type = PROTOCOLID_ETH;
666 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
669 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
670 return qed_spq_post(p_hwfn, p_ent, NULL);
673 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
674 struct qed_ll2_info *p_ll2_conn)
676 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
677 struct qed_spq_entry *p_ent = NULL;
678 struct qed_sp_init_data init_data;
682 memset(&init_data, 0, sizeof(init_data));
683 init_data.cid = p_ll2_conn->cid;
684 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
685 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
687 rc = qed_sp_init_request(p_hwfn, &p_ent,
688 CORE_RAMROD_RX_QUEUE_STOP,
689 PROTOCOLID_CORE, &init_data);
693 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
695 p_ramrod->complete_event_flg = 1;
696 p_ramrod->queue_id = p_ll2_conn->queue_id;
698 return qed_spq_post(p_hwfn, p_ent, NULL);
701 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
702 struct qed_ll2_info *p_ll2_conn)
704 struct qed_spq_entry *p_ent = NULL;
705 struct qed_sp_init_data init_data;
709 memset(&init_data, 0, sizeof(init_data));
710 init_data.cid = p_ll2_conn->cid;
711 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
712 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
714 rc = qed_sp_init_request(p_hwfn, &p_ent,
715 CORE_RAMROD_TX_QUEUE_STOP,
716 PROTOCOLID_CORE, &init_data);
720 return qed_spq_post(p_hwfn, p_ent, NULL);
724 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
725 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
727 struct qed_ll2_rx_packet *p_descq;
734 rc = qed_chain_alloc(p_hwfn->cdev,
735 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
736 QED_CHAIN_MODE_NEXT_PTR,
737 QED_CHAIN_CNT_TYPE_U16,
739 sizeof(struct core_rx_bd),
740 &p_ll2_info->rx_queue.rxq_chain);
742 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
746 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
747 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
751 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
754 p_ll2_info->rx_queue.descq_array = p_descq;
756 rc = qed_chain_alloc(p_hwfn->cdev,
757 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
759 QED_CHAIN_CNT_TYPE_U16,
761 sizeof(struct core_rx_fast_path_cqe),
762 &p_ll2_info->rx_queue.rcq_chain);
764 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
768 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
769 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
770 p_ll2_info->conn_type, rx_num_desc);
776 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
777 struct qed_ll2_info *p_ll2_info,
780 struct qed_ll2_tx_packet *p_descq;
787 rc = qed_chain_alloc(p_hwfn->cdev,
788 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
790 QED_CHAIN_CNT_TYPE_U16,
792 sizeof(struct core_tx_bd),
793 &p_ll2_info->tx_queue.txq_chain);
797 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
798 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
804 p_ll2_info->tx_queue.descq_array = p_descq;
806 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
807 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
808 p_ll2_info->conn_type, tx_num_desc);
813 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
818 int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
819 struct qed_ll2_info *p_params,
822 u8 *p_connection_handle)
824 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
825 struct qed_ll2_info *p_ll2_info = NULL;
829 if (!p_connection_handle || !p_hwfn->p_ll2_info)
832 /* Find a free connection to be used */
833 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
834 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
835 if (p_hwfn->p_ll2_info[i].b_active) {
836 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
840 p_hwfn->p_ll2_info[i].b_active = true;
841 p_ll2_info = &p_hwfn->p_ll2_info[i];
842 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
848 p_ll2_info->conn_type = p_params->conn_type;
849 p_ll2_info->mtu = p_params->mtu;
850 p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
851 p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
852 p_ll2_info->tx_tc = p_params->tx_tc;
853 p_ll2_info->tx_dest = p_params->tx_dest;
854 p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
855 p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
856 p_ll2_info->gsi_enable = p_params->gsi_enable;
858 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
860 goto q_allocate_fail;
862 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
864 goto q_allocate_fail;
866 /* Register callbacks for the Rx/Tx queues */
867 comp_rx_cb = qed_ll2_rxq_completion;
868 comp_tx_cb = qed_ll2_txq_completion;
871 qed_int_register_cb(p_hwfn, comp_rx_cb,
872 &p_hwfn->p_ll2_info[i],
873 &p_ll2_info->rx_queue.rx_sb_index,
874 &p_ll2_info->rx_queue.p_fw_cons);
875 p_ll2_info->rx_queue.b_cb_registred = true;
879 qed_int_register_cb(p_hwfn,
881 &p_hwfn->p_ll2_info[i],
882 &p_ll2_info->tx_queue.tx_sb_index,
883 &p_ll2_info->tx_queue.p_fw_cons);
884 p_ll2_info->tx_queue.b_cb_registred = true;
887 *p_connection_handle = i;
891 qed_ll2_release_connection(p_hwfn, i);
895 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
896 struct qed_ll2_info *p_ll2_conn)
898 u8 action_on_error = 0;
900 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
903 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
905 SET_FIELD(action_on_error,
906 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
907 p_ll2_conn->ai_err_packet_too_big);
908 SET_FIELD(action_on_error,
909 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
911 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
914 int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
916 struct qed_ll2_info *p_ll2_conn;
917 struct qed_ll2_rx_queue *p_rx;
918 struct qed_ll2_tx_queue *p_tx;
923 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
926 p_rx = &p_ll2_conn->rx_queue;
927 p_tx = &p_ll2_conn->tx_queue;
929 qed_chain_reset(&p_rx->rxq_chain);
930 qed_chain_reset(&p_rx->rcq_chain);
931 INIT_LIST_HEAD(&p_rx->active_descq);
932 INIT_LIST_HEAD(&p_rx->free_descq);
933 INIT_LIST_HEAD(&p_rx->posting_descq);
934 spin_lock_init(&p_rx->lock);
935 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
936 for (i = 0; i < capacity; i++)
937 list_add_tail(&p_rx->descq_array[i].list_entry,
939 *p_rx->p_fw_cons = 0;
941 qed_chain_reset(&p_tx->txq_chain);
942 INIT_LIST_HEAD(&p_tx->active_descq);
943 INIT_LIST_HEAD(&p_tx->free_descq);
944 INIT_LIST_HEAD(&p_tx->sending_descq);
945 spin_lock_init(&p_tx->lock);
946 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
947 for (i = 0; i < capacity; i++)
948 list_add_tail(&p_tx->descq_array[i].list_entry,
950 p_tx->cur_completing_bd_idx = 0;
952 p_tx->b_completing_packet = false;
953 p_tx->cur_send_packet = NULL;
954 p_tx->cur_send_frag_num = 0;
955 p_tx->cur_completing_frag_num = 0;
956 *p_tx->p_fw_cons = 0;
958 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
960 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
961 p_ll2_conn->queue_id = qid;
962 p_ll2_conn->tx_stats_id = qid;
963 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
964 GTT_BAR0_MAP_REG_TSDM_RAM +
965 TSTORM_LL2_RX_PRODS_OFFSET(qid);
966 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
967 qed_db_addr(p_ll2_conn->cid,
970 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
974 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
978 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
979 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
984 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
985 struct qed_ll2_rx_queue *p_rx,
986 struct qed_ll2_rx_packet *p_curp)
988 struct qed_ll2_rx_packet *p_posting_packet = NULL;
989 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
990 bool b_notify_fw = false;
991 u16 bd_prod, cq_prod;
993 /* This handles the flushing of already posted buffers */
994 while (!list_empty(&p_rx->posting_descq)) {
995 p_posting_packet = list_first_entry(&p_rx->posting_descq,
996 struct qed_ll2_rx_packet,
998 list_move_tail(&p_posting_packet->list_entry,
999 &p_rx->active_descq);
1003 /* This handles the supplied packet [if there is one] */
1005 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1012 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1013 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1014 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1015 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1017 /* Make sure chain element is updated before ringing the doorbell */
1020 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1023 int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1024 u8 connection_handle,
1026 u16 buf_len, void *cookie, u8 notify_fw)
1028 struct core_rx_bd_with_buff_len *p_curb = NULL;
1029 struct qed_ll2_rx_packet *p_curp = NULL;
1030 struct qed_ll2_info *p_ll2_conn;
1031 struct qed_ll2_rx_queue *p_rx;
1032 unsigned long flags;
1036 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1039 p_rx = &p_ll2_conn->rx_queue;
1041 spin_lock_irqsave(&p_rx->lock, flags);
1042 if (!list_empty(&p_rx->free_descq))
1043 p_curp = list_first_entry(&p_rx->free_descq,
1044 struct qed_ll2_rx_packet, list_entry);
1046 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1047 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1048 p_data = qed_chain_produce(&p_rx->rxq_chain);
1049 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1050 qed_chain_produce(&p_rx->rcq_chain);
1054 /* If we're lacking entires, let's try to flush buffers to FW */
1055 if (!p_curp || !p_curb) {
1061 /* We have an Rx packet we can fill */
1062 DMA_REGPAIR_LE(p_curb->addr, addr);
1063 p_curb->buff_length = cpu_to_le16(buf_len);
1064 p_curp->rx_buf_addr = addr;
1065 p_curp->cookie = cookie;
1066 p_curp->rxq_bd = p_curb;
1067 p_curp->buf_length = buf_len;
1068 list_del(&p_curp->list_entry);
1070 /* Check if we only want to enqueue this packet without informing FW */
1072 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1077 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1079 spin_unlock_irqrestore(&p_rx->lock, flags);
1083 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1084 struct qed_ll2_tx_queue *p_tx,
1085 struct qed_ll2_tx_packet *p_curp,
1087 dma_addr_t first_frag,
1088 u16 first_frag_len, void *p_cookie,
1091 list_del(&p_curp->list_entry);
1092 p_curp->cookie = p_cookie;
1093 p_curp->bd_used = num_of_bds;
1094 p_curp->notify_fw = notify_fw;
1095 p_tx->cur_send_packet = p_curp;
1096 p_tx->cur_send_frag_num = 0;
1098 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1099 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1100 p_tx->cur_send_frag_num++;
1103 static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1104 struct qed_ll2_info *p_ll2,
1105 struct qed_ll2_tx_packet *p_curp,
1107 enum core_tx_dest tx_dest,
1110 u16 l4_hdr_offset_w,
1111 enum core_roce_flavor_type type,
1112 dma_addr_t first_frag,
1115 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1116 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1117 struct core_tx_bd *start_bd = NULL;
1120 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1121 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1122 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1123 cpu_to_le16(l4_hdr_offset_w));
1124 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1125 start_bd->bd_flags.as_bitfield = bd_flags;
1126 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1127 CORE_TX_BD_FLAGS_START_BD_SHIFT;
1128 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
1129 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
1130 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1131 start_bd->nbytes = cpu_to_le16(first_frag_len);
1134 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1135 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1142 le32_to_cpu(start_bd->addr.hi),
1143 le32_to_cpu(start_bd->addr.lo));
1145 if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1148 /* Need to provide the packet with additional BDs for frags */
1149 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1150 frag_idx < num_of_bds; frag_idx++) {
1151 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1153 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1154 (*p_bd)->bd_flags.as_bitfield = 0;
1155 (*p_bd)->bitfield1 = 0;
1156 (*p_bd)->bitfield0 = 0;
1157 p_curp->bds_set[frag_idx].tx_frag = 0;
1158 p_curp->bds_set[frag_idx].frag_len = 0;
1162 /* This should be called while the Txq spinlock is being held */
1163 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1164 struct qed_ll2_info *p_ll2_conn)
1166 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1167 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1168 struct qed_ll2_tx_packet *p_pkt = NULL;
1169 struct core_db_data db_msg = { 0, 0, 0 };
1172 /* If there are missing BDs, don't do anything now */
1173 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1174 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1177 /* Push the current packet to the list and clean after it */
1178 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1179 &p_ll2_conn->tx_queue.sending_descq);
1180 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1181 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1183 /* Notify FW of packet only if requested to */
1187 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1189 while (!list_empty(&p_tx->sending_descq)) {
1190 p_pkt = list_first_entry(&p_tx->sending_descq,
1191 struct qed_ll2_tx_packet, list_entry);
1195 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1198 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1199 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1200 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1201 DQ_XCM_CORE_TX_BD_PROD_CMD);
1202 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1203 db_msg.spq_prod = cpu_to_le16(bd_prod);
1205 /* Make sure the BDs data is updated before ringing the doorbell */
1208 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1211 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1212 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1213 p_ll2_conn->queue_id,
1214 p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
1217 int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1218 u8 connection_handle,
1222 u16 l4_hdr_offset_w,
1223 enum qed_ll2_roce_flavor_type qed_roce_flavor,
1224 dma_addr_t first_frag,
1225 u16 first_frag_len, void *cookie, u8 notify_fw)
1227 struct qed_ll2_tx_packet *p_curp = NULL;
1228 struct qed_ll2_info *p_ll2_conn = NULL;
1229 enum core_roce_flavor_type roce_flavor;
1230 struct qed_ll2_tx_queue *p_tx;
1231 struct qed_chain *p_tx_chain;
1232 unsigned long flags;
1235 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1238 p_tx = &p_ll2_conn->tx_queue;
1239 p_tx_chain = &p_tx->txq_chain;
1241 if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1244 spin_lock_irqsave(&p_tx->lock, flags);
1245 if (p_tx->cur_send_packet) {
1250 /* Get entry, but only if we have tx elements for it */
1251 if (!list_empty(&p_tx->free_descq))
1252 p_curp = list_first_entry(&p_tx->free_descq,
1253 struct qed_ll2_tx_packet, list_entry);
1254 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1262 if (qed_roce_flavor == QED_LL2_ROCE) {
1263 roce_flavor = CORE_ROCE;
1264 } else if (qed_roce_flavor == QED_LL2_RROCE) {
1265 roce_flavor = CORE_RROCE;
1271 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1272 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1273 num_of_bds, first_frag,
1274 first_frag_len, cookie, notify_fw);
1275 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
1276 num_of_bds, CORE_TX_DEST_NW,
1277 vlan, bd_flags, l4_hdr_offset_w,
1279 first_frag, first_frag_len);
1281 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1284 spin_unlock_irqrestore(&p_tx->lock, flags);
1288 int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1289 u8 connection_handle,
1290 dma_addr_t addr, u16 nbytes)
1292 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1293 struct qed_ll2_info *p_ll2_conn = NULL;
1294 u16 cur_send_frag_num = 0;
1295 struct core_tx_bd *p_bd;
1296 unsigned long flags;
1298 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1302 if (!p_ll2_conn->tx_queue.cur_send_packet)
1305 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1306 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1308 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1311 /* Fill the BD information, and possibly notify FW */
1312 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1313 DMA_REGPAIR_LE(p_bd->addr, addr);
1314 p_bd->nbytes = cpu_to_le16(nbytes);
1315 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1316 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1318 p_ll2_conn->tx_queue.cur_send_frag_num++;
1320 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1321 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1322 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1327 int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1329 struct qed_ll2_info *p_ll2_conn = NULL;
1332 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1336 /* Stop Tx & Rx of connection, if needed */
1337 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1338 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1341 qed_ll2_txq_flush(p_hwfn, connection_handle);
1344 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1345 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1348 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1354 void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1356 struct qed_ll2_info *p_ll2_conn = NULL;
1358 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1362 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1363 p_ll2_conn->rx_queue.b_cb_registred = false;
1364 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1367 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1368 p_ll2_conn->tx_queue.b_cb_registred = false;
1369 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1372 kfree(p_ll2_conn->tx_queue.descq_array);
1373 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1375 kfree(p_ll2_conn->rx_queue.descq_array);
1376 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1377 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1379 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1381 mutex_lock(&p_ll2_conn->mutex);
1382 p_ll2_conn->b_active = false;
1383 mutex_unlock(&p_ll2_conn->mutex);
1386 struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1388 struct qed_ll2_info *p_ll2_connections;
1391 /* Allocate LL2's set struct */
1392 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1393 sizeof(struct qed_ll2_info), GFP_KERNEL);
1394 if (!p_ll2_connections) {
1395 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1399 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1400 p_ll2_connections[i].my_id = i;
1402 return p_ll2_connections;
1405 void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1406 struct qed_ll2_info *p_ll2_connections)
1410 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1411 mutex_init(&p_ll2_connections[i].mutex);
1414 void qed_ll2_free(struct qed_hwfn *p_hwfn,
1415 struct qed_ll2_info *p_ll2_connections)
1417 kfree(p_ll2_connections);
1420 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1421 struct qed_ptt *p_ptt,
1422 struct qed_ll2_info *p_ll2_conn,
1423 struct qed_ll2_stats *p_stats)
1425 struct core_ll2_tstorm_per_queue_stat tstats;
1426 u8 qid = p_ll2_conn->queue_id;
1429 memset(&tstats, 0, sizeof(tstats));
1430 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1431 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1432 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1434 p_stats->packet_too_big_discard =
1435 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1436 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1439 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1440 struct qed_ptt *p_ptt,
1441 struct qed_ll2_info *p_ll2_conn,
1442 struct qed_ll2_stats *p_stats)
1444 struct core_ll2_ustorm_per_queue_stat ustats;
1445 u8 qid = p_ll2_conn->queue_id;
1448 memset(&ustats, 0, sizeof(ustats));
1449 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1450 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1451 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1453 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1454 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1455 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1456 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1457 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1458 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1461 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1462 struct qed_ptt *p_ptt,
1463 struct qed_ll2_info *p_ll2_conn,
1464 struct qed_ll2_stats *p_stats)
1466 struct core_ll2_pstorm_per_queue_stat pstats;
1467 u8 stats_id = p_ll2_conn->tx_stats_id;
1470 memset(&pstats, 0, sizeof(pstats));
1471 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1472 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1473 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1475 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1476 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1477 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1478 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1479 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1480 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1483 int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1484 u8 connection_handle, struct qed_ll2_stats *p_stats)
1486 struct qed_ll2_info *p_ll2_conn = NULL;
1487 struct qed_ptt *p_ptt;
1489 memset(p_stats, 0, sizeof(*p_stats));
1491 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1492 !p_hwfn->p_ll2_info)
1495 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
1497 p_ptt = qed_ptt_acquire(p_hwfn);
1499 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1503 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1504 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1505 if (p_ll2_conn->tx_stats_en)
1506 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1508 qed_ptt_release(p_hwfn, p_ptt);
1512 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
1513 const struct qed_ll2_cb_ops *ops,
1516 cdev->ll2->cbs = ops;
1517 cdev->ll2->cb_cookie = cookie;
1520 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
1522 struct qed_ll2_info ll2_info;
1523 struct qed_ll2_buffer *buffer, *tmp_buffer;
1524 enum qed_ll2_conn_type conn_type;
1525 struct qed_ptt *p_ptt;
1528 /* Initialize LL2 locks & lists */
1529 INIT_LIST_HEAD(&cdev->ll2->list);
1530 spin_lock_init(&cdev->ll2->lock);
1531 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
1532 L1_CACHE_BYTES + params->mtu;
1533 cdev->ll2->frags_mapped = params->frags_mapped;
1535 /*Allocate memory for LL2 */
1536 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
1537 cdev->ll2->rx_size);
1538 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
1539 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1541 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
1545 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
1546 &buffer->phys_addr);
1552 list_add_tail(&buffer->list, &cdev->ll2->list);
1555 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
1557 conn_type = QED_LL2_TYPE_ISCSI;
1559 case QED_PCI_ETH_ROCE:
1560 conn_type = QED_LL2_TYPE_ROCE;
1563 conn_type = QED_LL2_TYPE_TEST;
1566 /* Prepare the temporary ll2 information */
1567 memset(&ll2_info, 0, sizeof(ll2_info));
1568 ll2_info.conn_type = conn_type;
1569 ll2_info.mtu = params->mtu;
1570 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
1571 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
1573 ll2_info.tx_dest = CORE_TX_DEST_NW;
1574 ll2_info.gsi_enable = 1;
1576 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
1577 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
1578 &cdev->ll2->handle);
1580 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
1584 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
1587 DP_INFO(cdev, "Failed to establish LL2 connection\n");
1591 /* Post all Rx buffers to FW */
1592 spin_lock_bh(&cdev->ll2->lock);
1593 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
1594 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
1596 buffer->phys_addr, 0, buffer, 1);
1599 "Failed to post an Rx buffer; Deleting it\n");
1600 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
1601 cdev->ll2->rx_size, DMA_FROM_DEVICE);
1602 kfree(buffer->data);
1603 list_del(&buffer->list);
1606 cdev->ll2->rx_cnt++;
1609 spin_unlock_bh(&cdev->ll2->lock);
1611 if (!cdev->ll2->rx_cnt) {
1612 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
1613 goto release_terminate;
1616 if (!is_valid_ether_addr(params->ll2_mac_address)) {
1617 DP_INFO(cdev, "Invalid Ethernet address\n");
1618 goto release_terminate;
1621 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1623 DP_INFO(cdev, "Failed to acquire PTT\n");
1624 goto release_terminate;
1627 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1628 params->ll2_mac_address);
1629 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1631 DP_ERR(cdev, "Failed to allocate LLH filter\n");
1632 goto release_terminate_all;
1635 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
1639 release_terminate_all:
1642 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1644 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1646 qed_ll2_kill_buffers(cdev);
1647 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1651 static int qed_ll2_stop(struct qed_dev *cdev)
1653 struct qed_ptt *p_ptt;
1656 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
1659 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1661 DP_INFO(cdev, "Failed to acquire PTT\n");
1665 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1666 cdev->ll2_mac_address);
1667 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1668 eth_zero_addr(cdev->ll2_mac_address);
1670 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
1673 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
1675 qed_ll2_kill_buffers(cdev);
1677 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1678 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1685 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
1687 const skb_frag_t *frag;
1688 int rc = -EINVAL, i;
1693 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
1694 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
1698 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
1699 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1700 1 + skb_shinfo(skb)->nr_frags);
1704 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
1705 skb->len, DMA_TO_DEVICE);
1706 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
1707 DP_NOTICE(cdev, "SKB mapping failed\n");
1711 /* Request HW to calculate IP csum */
1712 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
1713 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1714 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
1716 if (skb_vlan_tag_present(skb)) {
1717 vlan = skb_vlan_tag_get(skb);
1718 flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
1721 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
1723 1 + skb_shinfo(skb)->nr_frags,
1724 vlan, flags, 0, 0 /* RoCE FLAVOR */,
1725 mapping, skb->len, skb, 1);
1729 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1730 frag = &skb_shinfo(skb)->frags[i];
1731 if (!cdev->ll2->frags_mapped) {
1732 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
1733 skb_frag_size(frag),
1736 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
1739 "Unable to map frag - dropping packet\n");
1744 mapping = page_to_phys(skb_frag_page(frag)) |
1748 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
1751 skb_frag_size(frag));
1753 /* if failed not much to do here, partial packet has been posted
1754 * we can't free memory, will need to wait for completion.
1763 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
1769 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
1774 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
1775 cdev->ll2->handle, stats);
1778 const struct qed_ll2_ops qed_ll2_ops_pass = {
1779 .start = &qed_ll2_start,
1780 .stop = &qed_ll2_stop,
1781 .start_xmit = &qed_ll2_start_xmit,
1782 .register_cb_ops = &qed_ll2_register_cb_ops,
1783 .get_stats = &qed_ll2_stats,
1786 int qed_ll2_alloc_if(struct qed_dev *cdev)
1788 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
1789 return cdev->ll2 ? 0 : -ENOMEM;
1792 void qed_ll2_dealloc_if(struct qed_dev *cdev)