GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / net / ethernet / apm / xgene / xgene_enet_ring2.c
1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2015, Applied Micro Circuits Corporation
4  * Author: Iyappan Subramanian <isubramanian@apm.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "xgene_enet_main.h"
21 #include "xgene_enet_hw.h"
22 #include "xgene_enet_ring2.h"
23
24 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
25 {
26         u32 *ring_cfg = ring->state;
27         u64 addr = ring->dma;
28
29         if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
30                 ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK);
31                 ring_cfg[3] |= SET_BIT(X2_DEQINTEN);
32         }
33         ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2);
34
35         addr >>= 8;
36         ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr);
37
38         addr >>= 27;
39         ring_cfg[3] |= SET_VAL(RINGSIZE, ring->cfgsize)
40                     | ACCEPTLERR
41                     | SET_VAL(RINGADDRH, addr);
42         ring_cfg[4] |= SET_VAL(X2_SELTHRSH, 1);
43         ring_cfg[5] |= SET_BIT(X2_QBASE_AM) | SET_BIT(X2_MSG_AM);
44 }
45
46 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
47 {
48         u32 *ring_cfg = ring->state;
49         bool is_bufpool;
50         u32 val;
51
52         is_bufpool = xgene_enet_is_bufpool(ring->id);
53         val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
54         ring_cfg[4] |= SET_VAL(X2_RINGTYPE, val);
55         if (is_bufpool)
56                 ring_cfg[3] |= SET_VAL(RINGMODE, BUFPOOL_MODE);
57 }
58
59 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
60 {
61         u32 *ring_cfg = ring->state;
62
63         ring_cfg[3] |= RECOMBBUF;
64         ring_cfg[4] |= SET_VAL(X2_RECOMTIMEOUT, 0x7);
65 }
66
67 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
68                                  u32 offset, u32 data)
69 {
70         struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
71
72         iowrite32(data, pdata->ring_csr_addr + offset);
73 }
74
75 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
76 {
77         struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
78         int i;
79
80         xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
81         for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
82                 xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
83                                      ring->state[i]);
84         }
85 }
86
87 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
88 {
89         memset(ring->state, 0, sizeof(ring->state));
90         xgene_enet_write_ring_state(ring);
91 }
92
93 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
94 {
95         enum xgene_ring_owner owner;
96
97         xgene_enet_ring_set_type(ring);
98
99         owner = xgene_enet_ring_owner(ring->id);
100         if (owner == RING_OWNER_ETH0 || owner == RING_OWNER_ETH1)
101                 xgene_enet_ring_set_recombbuf(ring);
102
103         xgene_enet_ring_init(ring);
104         xgene_enet_write_ring_state(ring);
105 }
106
107 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
108 {
109         u32 ring_id_val, ring_id_buf;
110         bool is_bufpool;
111
112         if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU)
113                 return;
114
115         is_bufpool = xgene_enet_is_bufpool(ring->id);
116
117         ring_id_val = ring->id & GENMASK(9, 0);
118         ring_id_val |= OVERWRITE;
119
120         ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
121         ring_id_buf |= PREFETCH_BUF_EN;
122
123         if (is_bufpool)
124                 ring_id_buf |= IS_BUFFER_POOL;
125
126         xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
127         xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
128 }
129
130 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
131 {
132         u32 ring_id;
133
134         ring_id = ring->id | OVERWRITE;
135         xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
136         xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
137 }
138
139 static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
140                                     struct xgene_enet_desc_ring *ring)
141 {
142         bool is_bufpool;
143         u32 addr, i;
144
145         xgene_enet_clr_ring_state(ring);
146         xgene_enet_set_ring_state(ring);
147         xgene_enet_set_ring_id(ring);
148
149         ring->slots = xgene_enet_get_numslots(ring->id, ring->size);
150
151         is_bufpool = xgene_enet_is_bufpool(ring->id);
152         if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
153                 return ring;
154
155         addr = CSR_VMID0_INTR_MBOX + (4 * (ring->id & RING_BUFNUM_MASK));
156         xgene_enet_ring_wr32(ring, addr, ring->irq_mbox_dma >> 10);
157
158         for (i = 0; i < ring->slots; i++)
159                 xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
160
161         return ring;
162 }
163
164 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
165 {
166         xgene_enet_clr_desc_ring_id(ring);
167         xgene_enet_clr_ring_state(ring);
168 }
169
170 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
171 {
172         u32 data = 0;
173
174         if (xgene_enet_ring_owner(ring->id) == RING_OWNER_CPU) {
175                 data = SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK) |
176                        INTR_CLEAR;
177         }
178         data |= (count & GENMASK(16, 0));
179
180         iowrite32(data, ring->cmd);
181 }
182
183 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
184 {
185         u32 __iomem *cmd_base = ring->cmd_base;
186         u32 ring_state, num_msgs;
187
188         ring_state = ioread32(&cmd_base[1]);
189         num_msgs = GET_VAL(X2_NUMMSGSINQ, ring_state);
190
191         return num_msgs;
192 }
193
194 static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
195 {
196         u32 data = 0x77777777;
197
198         xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
199         xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data);
200         xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
201         xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data);
202         xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data);
203         xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08);
204         xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10);
205 }
206
207 struct xgene_ring_ops xgene_ring2_ops = {
208         .num_ring_config = X2_NUM_RING_CONFIG,
209         .num_ring_id_shift = 13,
210         .setup = xgene_enet_setup_ring,
211         .clear = xgene_enet_clear_ring,
212         .wr_cmd = xgene_enet_wr_cmd,
213         .len = xgene_enet_ring_len,
214         .coalesce = xgene_enet_setup_coalescing,
215 };