1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
23 /*! \file octeon_main.h
24 * \brief Host Driver: This file is included by all host driver source files
25 * to include common definitions.
28 #ifndef _OCTEON_MAIN_H_
29 #define _OCTEON_MAIN_H_
31 #if BITS_PER_LONG == 32
32 #define CVM_CAST64(v) ((long long)(v))
33 #elif BITS_PER_LONG == 64
34 #define CVM_CAST64(v) ((long long)(long)(v))
36 #error "Unknown system architecture"
39 #define DRV_NAME "LiquidIO"
41 /** This structure is used by NIC driver to store information required
42 * to free the sk_buff when the packet has been fetched by Octeon.
43 * Bytes offset below assume worst-case of a 64-bit system.
45 struct octnet_buf_free_info {
46 /** Bytes 1-8. Pointer to network device private structure. */
49 /** Bytes 9-16. Pointer to sk_buff. */
52 /** Bytes 17-24. Pointer to gather list. */
53 struct octnic_gather *g;
55 /** Bytes 25-32. Physical address of skb->data or gather list. */
58 /** Bytes 33-47. Piggybacked soft command, if any */
59 struct octeon_soft_command *sc;
62 /* BQL-related functions */
63 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
64 void octeon_update_tx_completion_counters(void *buf, int reqtype,
65 unsigned int *pkts_compl,
66 unsigned int *bytes_compl);
67 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
68 unsigned int bytes_compl);
71 static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
81 * \brief unmaps a PCI BAR
82 * @param oct Pointer to Octeon device
83 * @param baridx bar index
85 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
87 dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
90 if (oct->mmio[baridx].done)
91 iounmap(oct->mmio[baridx].hw_addr);
93 if (oct->mmio[baridx].start)
94 pci_release_region(oct->pci_dev, baridx * 2);
98 * \brief maps a PCI BAR
99 * @param oct Pointer to Octeon device
100 * @param baridx bar index
101 * @param max_map_len maximum length of mapped memory
103 static inline int octeon_map_pci_barx(struct octeon_device *oct,
104 int baridx, int max_map_len)
108 if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
109 dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
114 oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
115 oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
117 mapped_len = oct->mmio[baridx].len;
121 if (max_map_len && (mapped_len > max_map_len))
122 mapped_len = max_map_len;
124 oct->mmio[baridx].hw_addr =
125 ioremap(oct->mmio[baridx].start, mapped_len);
126 oct->mmio[baridx].mapped_len = mapped_len;
128 dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
129 baridx, oct->mmio[baridx].start, mapped_len,
130 oct->mmio[baridx].len);
132 if (!oct->mmio[baridx].hw_addr) {
133 dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
137 oct->mmio[baridx].done = 1;
143 cnnic_numa_alloc_aligned_dma(u32 size,
151 #define OCTEON_MAX_ALLOC_RETRIES 1
153 struct page *page = NULL;
155 page = alloc_pages_node(numa_node,
159 page = alloc_pages(GFP_KERNEL,
161 ptr = (void *)page_address(page);
162 if ((unsigned long)ptr & 0x07) {
163 __free_pages(page, get_order(size));
165 /* Increment the size required if the first
172 } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
175 *orig_ptr = (unsigned long)ptr;
176 if ((unsigned long)ptr & 0x07)
177 ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
181 #define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
182 free_pages(orig_ptr, get_order(size))
185 sleep_cond(wait_queue_head_t *wait_queue, int *condition)
190 init_waitqueue_entry(&we, current);
191 add_wait_queue(wait_queue, &we);
192 while (!(READ_ONCE(*condition))) {
193 set_current_state(TASK_INTERRUPTIBLE);
194 if (signal_pending(current)) {
201 set_current_state(TASK_RUNNING);
202 remove_wait_queue(wait_queue, &we);
207 sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond)
211 init_waitqueue_entry(&we, current);
212 add_wait_queue(waitq, &we);
213 while (!atomic_read(pcond)) {
214 set_current_state(TASK_INTERRUPTIBLE);
215 if (signal_pending(current))
220 set_current_state(TASK_RUNNING);
221 remove_wait_queue(waitq, &we);
224 /* Gives up the CPU for a timeout period.
225 * Check that the condition is not true before we go to sleep for a
229 sleep_timeout_cond(wait_queue_head_t *wait_queue,
235 init_waitqueue_entry(&we, current);
236 add_wait_queue(wait_queue, &we);
237 set_current_state(TASK_INTERRUPTIBLE);
239 schedule_timeout(timeout);
240 set_current_state(TASK_RUNNING);
241 remove_wait_queue(wait_queue, &we);
245 #define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
249 #define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
253 #define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
257 #define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
260 #endif /* _OCTEON_MAIN_H_ */