1 /******************************************************************************
4 * Shared producer-consumer ring macros.
6 * Tim Deegan and Andrew Warfield November 2004.
9 #ifndef __XEN_PUBLIC_IO_RING_H__
10 #define __XEN_PUBLIC_IO_RING_H__
12 typedef unsigned int RING_IDX;
14 /* Round a 32-bit unsigned constant down to the nearest power of two. */
15 #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
16 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
17 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
18 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
19 #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x))
22 * Calculate size of a shared ring, given the total available space for the
23 * ring and indexes (_sz), and the name tag of the request/response structure.
24 * A ring contains as many entries as will fit, rounded down to the nearest
25 * power of two (so we can mask with (size-1) to loop around).
27 #define __CONST_RING_SIZE(_s, _sz) \
28 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
29 sizeof(((struct _s##_sring *)0)->ring[0])))
31 * The same for passing in an actual pointer instead of a name tag.
33 #define __RING_SIZE(_s, _sz) \
34 (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
37 * Macros to make the correct C datatypes for a new kind of ring.
39 * To make a new ring datatype, you need to have two message structures,
40 * let's say request_t, and response_t already defined.
42 * In a header where you want the ring datatype declared, you then do:
44 * DEFINE_RING_TYPES(mytag, request_t, response_t);
46 * These expand out to give you a set of types, as you can see below.
47 * The most important of these are:
49 * mytag_sring_t - The shared ring.
50 * mytag_front_ring_t - The 'front' half of the ring.
51 * mytag_back_ring_t - The 'back' half of the ring.
53 * To initialize a ring in your code you need to know the location and size
54 * of the shared memory area (PAGE_SIZE, for instance). To initialise
57 * mytag_front_ring_t front_ring;
58 * SHARED_RING_INIT((mytag_sring_t *)shared_page);
59 * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
61 * Initializing the back follows similarly (note that only the front
62 * initializes the shared ring):
64 * mytag_back_ring_t back_ring;
65 * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
68 #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
70 /* Shared ring entry */ \
71 union __name##_sring_entry { \
76 /* Shared ring page */ \
77 struct __name##_sring { \
78 RING_IDX req_prod, req_event; \
79 RING_IDX rsp_prod, rsp_event; \
81 union __name##_sring_entry ring[1]; /* variable-length */ \
84 /* "Front" end's private variables */ \
85 struct __name##_front_ring { \
86 RING_IDX req_prod_pvt; \
88 unsigned int nr_ents; \
89 struct __name##_sring *sring; \
92 /* "Back" end's private variables */ \
93 struct __name##_back_ring { \
94 RING_IDX rsp_prod_pvt; \
96 unsigned int nr_ents; \
97 struct __name##_sring *sring; \
101 * Macros for manipulating rings.
103 * FRONT_RING_whatever works on the "front end" of a ring: here
104 * requests are pushed on to the ring and responses taken off it.
106 * BACK_RING_whatever works on the "back end" of a ring: here
107 * requests are taken off the ring and responses put on.
109 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL.
110 * This is OK in 1-for-1 request-response situations where the
111 * requestor (front end) never has more than RING_SIZE()-1
112 * outstanding requests.
115 /* Initialising empty rings */
116 #define SHARED_RING_INIT(_s) do { \
117 (_s)->req_prod = (_s)->rsp_prod = 0; \
118 (_s)->req_event = (_s)->rsp_event = 1; \
119 (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
122 #define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
123 (_r)->req_prod_pvt = (_i); \
124 (_r)->rsp_cons = (_i); \
125 (_r)->nr_ents = __RING_SIZE(_s, __size); \
126 (_r)->sring = (_s); \
129 #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
131 #define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
132 (_r)->rsp_prod_pvt = (_i); \
133 (_r)->req_cons = (_i); \
134 (_r)->nr_ents = __RING_SIZE(_s, __size); \
135 (_r)->sring = (_s); \
138 #define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
140 /* How big is this ring? */
141 #define RING_SIZE(_r) \
144 /* Number of free requests (for use on front side only). */
145 #define RING_FREE_REQUESTS(_r) \
146 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
148 /* Test if there is an empty slot available on the front ring.
149 * (This is only meaningful from the front. )
151 #define RING_FULL(_r) \
152 (RING_FREE_REQUESTS(_r) == 0)
154 /* Test if there are outstanding messages to be processed on a ring. */
155 #define RING_HAS_UNCONSUMED_RESPONSES(_r) \
156 ((_r)->sring->rsp_prod - (_r)->rsp_cons)
158 #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
159 unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
160 unsigned int rsp = RING_SIZE(_r) - \
161 ((_r)->req_cons - (_r)->rsp_prod_pvt); \
162 req < rsp ? req : rsp; \
165 /* Direct access to individual ring elements, by index. */
166 #define RING_GET_REQUEST(_r, _idx) \
167 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
169 #define RING_GET_RESPONSE(_r, _idx) \
170 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
173 * Get a local copy of a request/response.
175 * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
176 * done on a local copy that cannot be modified by the other end.
178 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
179 * to be ineffective where dest is a struct which consists of only bitfields.
181 #define RING_COPY_(type, r, idx, dest) do { \
182 /* Use volatile to force the copy into dest. */ \
183 *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
186 #define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
187 #define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
189 /* Loop termination condition: Would the specified index overflow the ring? */
190 #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
191 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
193 /* Ill-behaved frontend determination: Can there be this many requests? */
194 #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
195 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
197 /* Ill-behaved backend determination: Can there be this many responses? */
198 #define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
199 (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
201 #define RING_PUSH_REQUESTS(_r) do { \
202 virt_wmb(); /* back sees requests /before/ updated producer index */\
203 (_r)->sring->req_prod = (_r)->req_prod_pvt; \
206 #define RING_PUSH_RESPONSES(_r) do { \
207 virt_wmb(); /* front sees resps /before/ updated producer index */ \
208 (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
212 * Notification hold-off (req_event and rsp_event):
214 * When queueing requests or responses on a shared ring, it may not always be
215 * necessary to notify the remote end. For example, if requests are in flight
216 * in a backend, the front may be able to queue further requests without
217 * notifying the back (if the back checks for new requests when it queues
220 * When enqueuing requests or responses:
222 * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument
223 * is a boolean return value. True indicates that the receiver requires an
224 * asynchronous notification.
226 * After dequeuing requests or responses (before sleeping the connection):
228 * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES().
229 * The second argument is a boolean return value. True indicates that there
230 * are pending messages on the ring (i.e., the connection should not be put
233 * These macros will set the req_event/rsp_event field to trigger a
234 * notification on the very next message that is enqueued. If you want to
235 * create batches of work (i.e., only receive a notification after several
236 * messages have been enqueued) then you will need to create a customised
237 * version of the FINAL_CHECK macro in your own code, which sets the event
238 * field appropriately.
241 #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
242 RING_IDX __old = (_r)->sring->req_prod; \
243 RING_IDX __new = (_r)->req_prod_pvt; \
244 virt_wmb(); /* back sees requests /before/ updated producer index */\
245 (_r)->sring->req_prod = __new; \
246 virt_mb(); /* back sees new requests /before/ we check req_event */ \
247 (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
248 (RING_IDX)(__new - __old)); \
251 #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
252 RING_IDX __old = (_r)->sring->rsp_prod; \
253 RING_IDX __new = (_r)->rsp_prod_pvt; \
254 virt_wmb(); /* front sees resps /before/ updated producer index */ \
255 (_r)->sring->rsp_prod = __new; \
256 virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
257 (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
258 (RING_IDX)(__new - __old)); \
261 #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
262 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
263 if (_work_to_do) break; \
264 (_r)->sring->req_event = (_r)->req_cons + 1; \
266 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
269 #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
270 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
271 if (_work_to_do) break; \
272 (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
274 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
277 #endif /* __XEN_PUBLIC_IO_RING_H__ */