2 * Copyright (c) 2013 Qualcomm Atheros, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted (subject to the limitations in the
7 * disclaimer below) provided that the following conditions are met:
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
17 * * Neither the name of Qualcomm Atheros nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
22 * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
23 * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
33 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <dma_engine_api.h>
38 #include <Magpie_api.h>
40 #include <Magpie_api.h>
43 /***********************Constants***************************/
46 * @brief Descriptor specific bitmaps
48 enum __dma_desc_status{
49 DMA_STATUS_OWN_DRV = 0x0,
50 DMA_STATUS_OWN_DMA = 0x1,
51 DMA_STATUS_OWN_MSK = 0x3
59 enum __dma_burst_size{
65 DMA_BYTE_SWAP_OFF = 0x00,
66 DMA_BYTE_SWAP_ON = 0x01
69 * @brief Interrupt status bits
71 typedef enum __dma_intr_bits{
72 DMA_INTR_TX1_END = (1 << 25),/*TX1 reached the end or Under run*/
73 DMA_INTR_TX0_END = (1 << 24),/*TX0 reached the end or Under run*/
74 DMA_INTR_TX1_DONE = (1 << 17),/*TX1 has transmitted a packet*/
75 DMA_INTR_TX0_DONE = (1 << 16),/*TX1 has transmitted a packet*/
76 DMA_INTR_RX3_END = (1 << 11),/*RX3 reached the end or Under run*/
77 DMA_INTR_RX2_END = (1 << 10),/*RX2 reached the end or Under run*/
78 DMA_INTR_RX1_END = (1 << 9), /*RX1 reached the end or Under run*/
79 DMA_INTR_RX0_END = (1 << 8), /*RX0 reached the end or Under run*/
80 DMA_INTR_RX3_DONE = (1 << 3), /*RX3 received a packet*/
81 DMA_INTR_RX2_DONE = (1 << 2), /*RX2 received a packet*/
82 DMA_INTR_RX1_DONE = (1 << 1), /*RX1 received a packet*/
83 DMA_INTR_RX0_DONE = 1, /*RX0 received a packet*/
86 * @brief Base addresses for various HIF
88 typedef enum __dma_base_off{
89 DMA_BASE_OFF_HST = 0x00053000,
90 DMA_BASE_OFF_GMAC = 0x00054000,
91 DMA_BASE_OFF_PCI = DMA_BASE_OFF_HST,
92 DMA_BASE_OFF_PCIE = DMA_BASE_OFF_HST
95 * @brief Engine offset to add for per engine register reads or
98 typedef enum __dma_eng_off{
99 DMA_ENG_OFF_RX0 = 0x800,
100 DMA_ENG_OFF_RX1 = 0x900,
101 DMA_ENG_OFF_RX2 = 0xa00,
102 DMA_ENG_OFF_RX3 = 0xb00,
103 DMA_ENG_OFF_TX0 = 0xc00,
104 DMA_ENG_OFF_TX1 = 0xd00
107 *@brief DMA registers
109 typedef enum __dma_reg_off{
111 * Common or Non Engine specific
113 DMA_REG_IFTYPE = 0x00,/*XXX*/
114 DMA_REG_ISR = 0x00,/*Interrupt Status Register*/
115 DMA_REG_IMR = 0x04,/*Interrupt Mask Register*/
119 DMA_REG_TXDESC = 0x00,/*TX DP*/
120 DMA_REG_TXSTART = 0x04,/*TX start*/
121 DMA_REG_INTRLIM = 0x08,/*TX Interrupt limit*/
122 DMA_REG_TXBURST = 0x0c,/*TX Burst Size*/
123 DMA_REG_TXSWAP = 0x18,
127 DMA_REG_RXDESC = 0x00,/*RX DP*/
128 DMA_REG_RXSTART = 0x04,/*RX Start*/
129 DMA_REG_RXBURST = 0x08,/*RX Burst Size*/
130 DMA_REG_RXPKTOFF = 0x0c,/*RX Packet Offset*/
131 DMA_REG_RXSWAP = 0x1c
134 /*******************************Data types******************************/
136 typedef struct zsDmaDesc __dma_desc_t;
138 typedef struct zsDmaQueue __dma_rxq_t;
140 typedef struct zsTxDmaQueue __dma_txq_t;
143 * @brief Register Address
145 typedef struct __dma_reg_addr{
146 __dma_base_off_t base;/*Base address, Fixed*/
147 __dma_eng_off_t eng;/*Engine offset, Fixed*/
151 * @brief DMA engine's Queue
153 typedef struct __dma_eng_q{
154 __dma_reg_addr_t addr;
164 /***********************Defines*****************************/
166 #define DMA_ADDR_INIT(_eng) { \
167 .base = DMA_BASE_OFF_HST, \
168 .eng = DMA_ENG_OFF_##_eng \
171 * @brief check if the val doesn't lie between the low & high of
174 #define DMA_ENG_CHECK(_val, _low, _high) \
175 ((_val) < DMA_ENGINE_##_low || (_val) > DMA_ENGINE_##_high)
178 /********************************Globals*************************************/
180 __dma_eng_q_t eng_q[DMA_ENGINE_MAX] = {
181 {.addr = DMA_ADDR_INIT(RX0)},
182 {.addr = DMA_ADDR_INIT(RX1)},
183 {.addr = DMA_ADDR_INIT(RX2)},
184 {.addr = DMA_ADDR_INIT(RX3)},
185 {.addr = DMA_ADDR_INIT(TX0)},
186 {.addr = DMA_ADDR_INIT(TX1)},
189 /**********************************API's*************************************/
192 * @brief Read the register
199 __dma_reg_read(A_UINT32 addr)
201 return *((volatile A_UINT32 *)addr);
204 * @brief Write into the register
210 __dma_reg_write(A_UINT32 addr, A_UINT32 val)
212 *((volatile A_UINT32 *)addr) = val;
215 * @brief Set the base address
221 __dma_set_base(dma_engine_t eng_no, dma_iftype_t if_type)
225 eng_q[eng_no].addr.base = DMA_BASE_OFF_GMAC;
228 eng_q[eng_no].addr.base = DMA_BASE_OFF_PCI;
231 eng_q[eng_no].addr.base = DMA_BASE_OFF_PCIE;
238 * @brief init the Transmit queue
246 __dma_lib_tx_init(dma_engine_t eng_no, dma_iftype_t if_type)
248 __dma_desc_t *head = NULL;
251 if(DMA_ENG_CHECK(eng_no, TX0, TX1))
254 DMA_Engine_init_tx_queue(&eng_q[eng_no].txq);
256 __dma_set_base(eng_no, if_type);
258 addr = eng_q[eng_no].addr.base + eng_q[eng_no].addr.eng;
260 head = eng_q[eng_no].txq.head;
262 __dma_reg_write(addr + DMA_REG_TXDESC,(A_UINT32)head);
263 __dma_reg_write(addr + DMA_REG_TXBURST, DMA_BURST_16W);
264 __dma_reg_write(addr + DMA_REG_TXSWAP, DMA_BYTE_SWAP_ON);
270 __dma_lib_rx_config(dma_engine_t eng_no, A_UINT16 num_desc,
273 __dma_desc_t *desc = NULL;
277 * Allocate the Receive Queue
279 DMA_Engine_config_rx_queue(&eng_q[eng_no].rxq, num_desc, gran);
281 desc = eng_q[eng_no].rxq.head;
282 addr = eng_q[eng_no].addr.base + eng_q[eng_no].addr.eng;
284 * Update RX queue head in the H/W, set the burst & say go
286 __dma_reg_write(addr + DMA_REG_RXDESC, (A_UINT32)desc);
287 __dma_reg_write(addr + DMA_REG_RXBURST, DMA_BURST_8W);
288 __dma_reg_write(addr + DMA_REG_RXSWAP, DMA_BYTE_SWAP_ON);
289 __dma_reg_write(addr + DMA_REG_RXSTART, DMA_BIT_SET);
294 * @brief Initialize the DMA engine
301 __dma_lib_rx_init(dma_engine_t eng_no, dma_iftype_t if_type)
303 if(DMA_ENG_CHECK(eng_no, RX0, RX3))
307 * XXX:The init can be called multiple times to setup different
308 * geometries of descriptors
310 DMA_Engine_init_rx_queue(&eng_q[eng_no].rxq);
312 __dma_set_base(eng_no, if_type);
317 * @brief Transmit VBUF for the specified engine number
324 __dma_hard_xmit(dma_engine_t eng_no, VBUF *vbuf)
328 addr = eng_q[eng_no].addr.base + eng_q[eng_no].addr.eng;
330 DMA_Engine_xmit_buf(&eng_q[eng_no].txq, vbuf);
334 __dma_reg_write(addr + DMA_REG_TXSTART, DMA_BIT_SET);
337 * @brief return a VBUF for the specified engine number
344 __dma_reap_xmitted(dma_engine_t eng_no)
346 return DMA_Engine_reap_xmited_buf(&eng_q[eng_no].txq);
349 * @brief flush all xmitted & to be xmitted (if you have the
350 * window) dudes from H/W
355 __dma_flush_xmit(dma_engine_t eng_no)
358 __dma_desc_t *desc, *term;
360 addr = eng_q[eng_no].addr.base + eng_q[eng_no].addr.eng;
362 desc = eng_q[eng_no].txq.head;
363 term = eng_q[eng_no].txq.terminator;
366 * XXX: I don't know how to kick the all dudes out, Ideally
367 * there should be a DMA reset button (the red one)
369 __dma_reg_write(addr + DMA_REG_TXSTART, DMA_BIT_CLEAR);
370 __dma_reg_write(addr + DMA_REG_TXDESC,(A_UINT32)term);
373 * Make the H/W queue ready for TX reap
375 for(;desc != term; desc = desc->nextAddr)
376 desc->status = DMA_STATUS_OWN_DRV;
378 // DMA_Engine_flush_xmit(&eng_q[eng_no].txq);
381 * @brief check if there are xmitted vbufs (dudes) hanging
389 __dma_xmit_done(dma_engine_t eng_no)
391 if(DMA_ENG_CHECK(eng_no, TX0, TX1))
394 return DMA_Engine_has_compl_packets(&eng_q[eng_no].txq);
397 * @brief Reap VBUF's from the specified engine number
404 __dma_reap_recv(dma_engine_t eng)
406 return DMA_Engine_reap_recv_buf(&eng_q[eng].rxq);
409 * @brief return to source, put the vbuf back into the queue, In
410 * case the Engine is stopped so start it again
416 __dma_return_recv(dma_engine_t eng_no, VBUF *vbuf)
420 addr = eng_q[eng_no].addr.base + eng_q[eng_no].addr.eng;
422 DMA_Engine_return_recv_buf(&eng_q[eng_no].rxq, vbuf);
424 __dma_reg_write(addr + DMA_REG_RXSTART, DMA_BIT_SET);
427 * @brief check if there are freshly arrived vbufs (dudes)
434 __dma_recv_pkt(dma_engine_t eng_no)
436 if(DMA_ENG_CHECK(eng_no, RX0, RX3))
439 return DMA_Engine_has_compl_packets(&eng_q[eng_no].rxq);
443 dma_lib_module_install(struct dma_lib_api *apis)
445 apis->tx_init = __dma_lib_tx_init;
446 apis->rx_init = __dma_lib_rx_init;
447 apis->rx_config = __dma_lib_rx_config;
448 apis->hard_xmit = __dma_hard_xmit;
449 apis->flush_xmit = __dma_flush_xmit;
450 apis->xmit_done = __dma_xmit_done;
451 apis->reap_recv = __dma_reap_recv;
452 apis->reap_xmitted = __dma_reap_xmitted;
453 apis->return_recv = __dma_return_recv;
454 apis->recv_pkt = __dma_recv_pkt;