1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
18 **********************************************************************/
20 /*! \file octeon_network.h
21 * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
24 #ifndef __OCTEON_NETWORK_H__
25 #define __OCTEON_NETWORK_H__
26 #include <linux/ptp_clock_kernel.h>
28 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
29 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
31 /* Bit mask values for lio->ifstate */
32 #define LIO_IFSTATE_DROQ_OPS 0x01
33 #define LIO_IFSTATE_REGISTERED 0x02
34 #define LIO_IFSTATE_RUNNING 0x04
35 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
36 #define LIO_IFSTATE_RESETTING 0x10
38 struct liquidio_if_cfg_context {
44 struct liquidio_if_cfg_resp {
46 struct liquidio_if_cfg_info cfg_info;
50 #define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */
52 /* Structure of a node in list of gather components maintained by
53 * NIC driver for each network device.
55 struct octnic_gather {
56 /* List manipulation. Next and prev pointers. */
57 struct list_head list;
59 /* Size of the gather component at sg in bytes. */
62 /* Number of bytes that sg was adjusted to make it 8B-aligned. */
65 /* Gather component that can accommodate max sized fragment list
66 * received from the IP layer.
68 struct octeon_sg_entry *sg;
70 dma_addr_t sg_dma_ptr;
73 struct oct_nic_stats_resp {
75 struct oct_link_stats stats;
79 struct oct_nic_stats_ctrl {
80 struct completion complete;
81 struct net_device *netdev;
84 struct oct_nic_seapi_resp {
90 struct liquidio_nic_seapi_ctl_context {
93 struct completion complete;
96 /** LiquidIO per-interface network private data */
98 /** State of the interface. Rx/Tx happens only in the RUNNING state. */
101 /** Octeon Interface index number. This device will be represented as
102 * oct<ifidx> in the system.
106 /** Octeon Input queue to use to transmit for this network interface. */
109 /** Octeon Output queue from which pkts arrive
110 * for this network interface.
114 /** Guards each glist */
115 spinlock_t *glist_lock;
117 /** Array of gather component linked lists */
118 struct list_head *glist;
119 void **glists_virt_base;
120 dma_addr_t *glists_dma_base;
121 u32 glist_entry_size;
123 /** Pointer to the NIC properties for the Octeon device this network
124 * interface is associated with.
126 struct octdev_props *octprops;
128 /** Pointer to the octeon device structure. */
129 struct octeon_device *oct_dev;
131 struct net_device *netdev;
133 /** Link information sent by the core application for this interface. */
134 struct oct_link_info linfo;
136 /** counter of link changes */
139 /** Size of Tx queue for this octeon device. */
142 /** Size of Rx queue for this octeon device. */
145 /** Size of MTU this octeon device. */
148 /** msg level flag per interface. */
151 /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
154 /* Copy of transmit encapsulation capabilities:
155 * TSO, TSO6, Checksums for this device for Kernel
158 u64 enc_dev_capability;
160 /** Copy of beacaon reg in phy */
163 /** Copy of ctrl reg in phy */
166 /* PTP clock information */
167 struct ptp_clock_info ptp_info;
168 struct ptp_clock *ptp_clock;
171 /* for atomic access to Octeon PTP reg and data struct */
177 /* work queue for txq status */
178 struct cavium_wq txq_status_wq;
180 /* work queue for rxq oom status */
181 struct cavium_wq rxq_status_wq;
183 /* work queue for link status */
184 struct cavium_wq link_status_wq;
186 /* work queue to regularly send local time to octeon firmware */
187 struct cavium_wq sync_octeon_time_wq;
192 #define LIO_SIZE (sizeof(struct lio))
193 #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
195 #define LIO_MAX_CORES 16
198 * \brief Enable or disable feature
199 * @param netdev pointer to network device
200 * @param cmd Command that just requires acknowledgment
201 * @param param1 Parameter to command
203 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
205 int setup_rx_oom_poll_fn(struct net_device *netdev);
207 void cleanup_rx_oom_poll_fn(struct net_device *netdev);
210 * \brief Link control command completion callback
211 * @param nctrl_ptr pointer to control packet structure
213 * This routine is called by the callback function when a ctrl pkt sent to
214 * core app completes. The nctrl_ptr contains a copy of the command type
215 * and data sent to the core app. This routine is only called if the ctrl
216 * pkt was sent successfully to the core app.
218 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
220 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
221 u32 num_iqs, u32 num_oqs);
223 irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
226 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
228 int octnet_get_link_stats(struct net_device *netdev);
230 int lio_wait_for_clean_oq(struct octeon_device *oct);
232 * \brief Register ethtool operations
233 * @param netdev pointer to network device
235 void liquidio_set_ethtool_ops(struct net_device *netdev);
237 void lio_if_cfg_callback(struct octeon_device *oct,
238 u32 status __attribute__((unused)),
241 void lio_delete_glists(struct lio *lio);
243 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
245 int liquidio_get_speed(struct lio *lio);
246 int liquidio_set_speed(struct lio *lio, int speed);
249 * \brief Net device change_mtu
250 * @param netdev network device
252 int liquidio_change_mtu(struct net_device *netdev, int new_mtu);
253 #define LIO_CHANGE_MTU_SUCCESS 1
254 #define LIO_CHANGE_MTU_FAIL 2
256 #define SKB_ADJ_MASK 0x3F
257 #define SKB_ADJ (SKB_ADJ_MASK + 1)
259 #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
260 #define LIO_RXBUFFER_SZ 2048
263 *recv_buffer_alloc(struct octeon_device *oct,
264 struct octeon_skb_page_info *pg_info)
268 struct octeon_skb_page_info *skb_pg_info;
270 page = alloc_page(GFP_ATOMIC);
274 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
275 if (unlikely(!skb)) {
277 pg_info->page = NULL;
281 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
282 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
287 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
289 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
290 PAGE_SIZE, DMA_FROM_DEVICE);
292 /* Mapping failed!! */
293 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
295 dev_kfree_skb_any((struct sk_buff *)skb);
296 pg_info->page = NULL;
300 pg_info->page = page;
301 pg_info->page_offset = 0;
302 skb_pg_info->page = page;
303 skb_pg_info->page_offset = 0;
304 skb_pg_info->dma = pg_info->dma;
310 *recv_buffer_fast_alloc(u32 size)
313 struct octeon_skb_page_info *skb_pg_info;
315 skb = dev_alloc_skb(size + SKB_ADJ);
319 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
320 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
325 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
326 skb_pg_info->page = NULL;
327 skb_pg_info->page_offset = 0;
328 skb_pg_info->dma = 0;
334 recv_buffer_recycle(struct octeon_device *oct, void *buf)
336 struct octeon_skb_page_info *pg_info = buf;
338 if (!pg_info->page) {
339 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
344 if (unlikely(page_count(pg_info->page) != 1) ||
345 unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
346 dma_unmap_page(&oct->pci_dev->dev,
347 pg_info->dma, (PAGE_SIZE << 0),
350 pg_info->page = NULL;
351 pg_info->page_offset = 0;
355 /* Flip to other half of the buffer */
356 if (pg_info->page_offset == 0)
357 pg_info->page_offset = LIO_RXBUFFER_SZ;
359 pg_info->page_offset = 0;
360 page_ref_inc(pg_info->page);
366 *recv_buffer_reuse(struct octeon_device *oct, void *buf)
368 struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
371 skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
372 if (unlikely(!skb)) {
373 dma_unmap_page(&oct->pci_dev->dev,
374 pg_info->dma, (PAGE_SIZE << 0),
379 if ((unsigned long)skb->data & SKB_ADJ_MASK) {
380 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
385 skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
386 skb_pg_info->page = pg_info->page;
387 skb_pg_info->page_offset = pg_info->page_offset;
388 skb_pg_info->dma = pg_info->dma;
394 recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
396 struct sk_buff *skb = (struct sk_buff *)buffer;
398 put_page(pg_info->page);
400 pg_info->page = NULL;
401 pg_info->page_offset = 0;
404 dev_kfree_skb_any(skb);
407 static inline void recv_buffer_free(void *buffer)
409 struct sk_buff *skb = (struct sk_buff *)buffer;
410 struct octeon_skb_page_info *pg_info;
412 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
415 put_page(pg_info->page);
417 pg_info->page = NULL;
418 pg_info->page_offset = 0;
421 dev_kfree_skb_any((struct sk_buff *)buffer);
425 recv_buffer_fast_free(void *buffer)
427 dev_kfree_skb_any((struct sk_buff *)buffer);
430 static inline void tx_buffer_free(void *buffer)
432 dev_kfree_skb_any((struct sk_buff *)buffer);
435 #define lio_dma_alloc(oct, size, dma_addr) \
436 dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
437 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
438 dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
441 void *get_rbd(struct sk_buff *skb)
443 struct octeon_skb_page_info *pg_info;
446 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
447 va = page_address(pg_info->page) + pg_info->page_offset;
453 lio_map_ring(void *buf)
457 struct sk_buff *skb = (struct sk_buff *)buf;
458 struct octeon_skb_page_info *pg_info;
460 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
461 if (!pg_info->page) {
462 pr_err("%s: pg_info->page NULL\n", __func__);
467 dma_addr = pg_info->dma;
469 pr_err("%s: ERROR it should be already available\n",
473 dma_addr += pg_info->page_offset;
475 return (u64)dma_addr;
479 lio_unmap_ring(struct pci_dev *pci_dev,
483 dma_unmap_page(&pci_dev->dev,
484 buf_ptr, (PAGE_SIZE << 0),
488 static inline void *octeon_fast_packet_alloc(u32 size)
490 return recv_buffer_fast_alloc(size);
493 static inline void octeon_fast_packet_next(struct octeon_droq *droq,
494 struct sk_buff *nicbuf,
498 skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer),
503 * \brief check interface state
504 * @param lio per-network private data
505 * @param state_flag flag state to check
507 static inline int ifstate_check(struct lio *lio, int state_flag)
509 return atomic_read(&lio->ifstate) & state_flag;
513 * \brief set interface state
514 * @param lio per-network private data
515 * @param state_flag flag state to set
517 static inline void ifstate_set(struct lio *lio, int state_flag)
519 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
523 * \brief clear interface state
524 * @param lio per-network private data
525 * @param state_flag flag state to clear
527 static inline void ifstate_reset(struct lio *lio, int state_flag)
529 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
533 * \brief wait for all pending requests to complete
534 * @param oct Pointer to Octeon device
536 * Called during shutdown sequence
538 static inline int wait_for_pending_requests(struct octeon_device *oct)
542 for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) {
543 pcount = atomic_read(
544 &oct->response_list[OCTEON_ORDERED_SC_LIST]
547 schedule_timeout_uninterruptible(HZ / 10);
559 * \brief Stop Tx queues
560 * @param netdev network device
562 static inline void stop_txqs(struct net_device *netdev)
566 for (i = 0; i < netdev->real_num_tx_queues; i++)
567 netif_stop_subqueue(netdev, i);
571 * \brief Wake Tx queues
572 * @param netdev network device
574 static inline void wake_txqs(struct net_device *netdev)
576 struct lio *lio = GET_LIO(netdev);
579 for (i = 0; i < netdev->real_num_tx_queues; i++) {
580 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
582 if (__netif_subqueue_stopped(netdev, i)) {
583 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
585 netif_wake_subqueue(netdev, i);
591 * \brief Start Tx queues
592 * @param netdev network device
594 static inline void start_txqs(struct net_device *netdev)
596 struct lio *lio = GET_LIO(netdev);
599 if (lio->linfo.link.s.link_up) {
600 for (i = 0; i < netdev->real_num_tx_queues; i++)
601 netif_start_subqueue(netdev, i);
605 static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
607 return skb->queue_mapping % oct->num_iqs;
611 * Remove the node at the head of the list. The list would be empty at
612 * the end of this call if there are no more nodes in the list.
614 static inline struct list_head *lio_list_delete_head(struct list_head *root)
616 struct list_head *node;
618 if (root->prev == root && root->next == root)