GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / cisco / enic / vnic_rq.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
4  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
5  */
6
7 #ifndef _VNIC_RQ_H_
8 #define _VNIC_RQ_H_
9
10 #include <linux/pci.h>
11 #include <linux/netdevice.h>
12
13 #include "vnic_dev.h"
14 #include "vnic_cq.h"
15
16 /* Receive queue control */
17 struct vnic_rq_ctrl {
18         u64 ring_base;                  /* 0x00 */
19         u32 ring_size;                  /* 0x08 */
20         u32 pad0;
21         u32 posted_index;               /* 0x10 */
22         u32 pad1;
23         u32 cq_index;                   /* 0x18 */
24         u32 pad2;
25         u32 enable;                     /* 0x20 */
26         u32 pad3;
27         u32 running;                    /* 0x28 */
28         u32 pad4;
29         u32 fetch_index;                /* 0x30 */
30         u32 pad5;
31         u32 error_interrupt_enable;     /* 0x38 */
32         u32 pad6;
33         u32 error_interrupt_offset;     /* 0x40 */
34         u32 pad7;
35         u32 error_status;               /* 0x48 */
36         u32 pad8;
37         u32 dropped_packet_count;       /* 0x50 */
38         u32 pad9;
39         u32 dropped_packet_count_rc;    /* 0x58 */
40         u32 pad10;
41 };
42
43 /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
44 #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
45 #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
46 #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
47         ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
48         VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
49 #define VNIC_RQ_BUF_BLK_SZ(entries) \
50         (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
51 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
52         DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
53 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
54
55 struct vnic_rq_buf {
56         struct vnic_rq_buf *next;
57         dma_addr_t dma_addr;
58         void *os_buf;
59         unsigned int os_buf_index;
60         unsigned int len;
61         unsigned int index;
62         void *desc;
63         uint64_t wr_id;
64 };
65
66 enum enic_poll_state {
67         ENIC_POLL_STATE_IDLE,
68         ENIC_POLL_STATE_NAPI,
69         ENIC_POLL_STATE_POLL
70 };
71
72 struct vnic_rq {
73         unsigned int index;
74         struct vnic_dev *vdev;
75         struct vnic_rq_ctrl __iomem *ctrl;              /* memory-mapped */
76         struct vnic_dev_ring ring;
77         struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
78         struct vnic_rq_buf *to_use;
79         struct vnic_rq_buf *to_clean;
80         void *os_buf_head;
81         unsigned int pkts_outstanding;
82 };
83
84 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
85 {
86         /* how many does SW own? */
87         return rq->ring.desc_avail;
88 }
89
90 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
91 {
92         /* how many does HW own? */
93         return rq->ring.desc_count - rq->ring.desc_avail - 1;
94 }
95
96 static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
97 {
98         return rq->to_use->desc;
99 }
100
101 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
102 {
103         return rq->to_use->index;
104 }
105
106 static inline void vnic_rq_post(struct vnic_rq *rq,
107         void *os_buf, unsigned int os_buf_index,
108         dma_addr_t dma_addr, unsigned int len,
109         uint64_t wrid)
110 {
111         struct vnic_rq_buf *buf = rq->to_use;
112
113         buf->os_buf = os_buf;
114         buf->os_buf_index = os_buf_index;
115         buf->dma_addr = dma_addr;
116         buf->len = len;
117         buf->wr_id = wrid;
118
119         buf = buf->next;
120         rq->to_use = buf;
121         rq->ring.desc_avail--;
122
123         /* Move the posted_index every nth descriptor
124          */
125
126 #ifndef VNIC_RQ_RETURN_RATE
127 #define VNIC_RQ_RETURN_RATE             0xf     /* keep 2^n - 1 */
128 #endif
129
130         if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
131                 /* Adding write memory barrier prevents compiler and/or CPU
132                  * reordering, thus avoiding descriptor posting before
133                  * descriptor is initialized. Otherwise, hardware can read
134                  * stale descriptor fields.
135                  */
136                 wmb();
137                 iowrite32(buf->index, &rq->ctrl->posted_index);
138         }
139 }
140
141 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
142 {
143         rq->ring.desc_avail += count;
144 }
145
146 enum desc_return_options {
147         VNIC_RQ_RETURN_DESC,
148         VNIC_RQ_DEFER_RETURN_DESC,
149 };
150
151 static inline void vnic_rq_service(struct vnic_rq *rq,
152         struct cq_desc *cq_desc, u16 completed_index,
153         int desc_return, void (*buf_service)(struct vnic_rq *rq,
154         struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
155         int skipped, void *opaque), void *opaque)
156 {
157         struct vnic_rq_buf *buf;
158         int skipped;
159
160         buf = rq->to_clean;
161         while (1) {
162
163                 skipped = (buf->index != completed_index);
164
165                 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
166
167                 if (desc_return == VNIC_RQ_RETURN_DESC)
168                         rq->ring.desc_avail++;
169
170                 rq->to_clean = buf->next;
171
172                 if (!skipped)
173                         break;
174
175                 buf = rq->to_clean;
176         }
177 }
178
179 static inline int vnic_rq_fill(struct vnic_rq *rq,
180         int (*buf_fill)(struct vnic_rq *rq))
181 {
182         int err;
183
184         while (vnic_rq_desc_avail(rq) > 0) {
185
186                 err = (*buf_fill)(rq);
187                 if (err)
188                         return err;
189         }
190
191         return 0;
192 }
193
194 void vnic_rq_free(struct vnic_rq *rq);
195 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
196         unsigned int desc_count, unsigned int desc_size);
197 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
198         unsigned int error_interrupt_enable,
199         unsigned int error_interrupt_offset);
200 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
201 void vnic_rq_enable(struct vnic_rq *rq);
202 int vnic_rq_disable(struct vnic_rq *rq);
203 void vnic_rq_clean(struct vnic_rq *rq,
204         void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
205
206 #endif /* _VNIC_RQ_H_ */