1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
10 /* implements a simple circular buffer that can optionally be
11 * shared between user space and the kernel and can be resized
13 * the requested element size is rounded up to a power of 2
14 * and the number of elements in the buffer is also rounded
15 * up to a power of 2. Since the queue is empty when the
16 * producer and consumer indices match the maximum capacity
17 * of the queue is one less than the number of element slots
20 /* this data structure is shared between user space and kernel
21 * space for those cases where the queue is shared. It contains
22 * the producer and consumer indices. Is also contains a copy
23 * of the queue size parameters for user space to use but the
24 * kernel must use the parameters in the rxe_queue struct
25 * this MUST MATCH the corresponding librxe struct
26 * for performance reasons arrange to have producer and consumer
27 * pointers in separate cache lines
28 * the kernel should always mask the indices to avoid accessing
29 * memory outside of the data area
31 struct rxe_queue_buf {
44 struct rxe_queue_buf *buf;
45 struct rxe_mmap_info *ip;
48 unsigned int log2_elem_size;
49 unsigned int index_mask;
52 int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
53 struct ib_udata *udata, struct rxe_queue_buf *buf,
54 size_t buf_size, struct rxe_mmap_info **ip_p);
56 void rxe_queue_reset(struct rxe_queue *q);
58 struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe,
60 unsigned int elem_size);
62 int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
63 unsigned int elem_size, struct ib_udata *udata,
64 struct mminfo __user *outbuf,
65 /* Protect producers while resizing queue */
66 spinlock_t *producer_lock,
67 /* Protect consumers while resizing queue */
68 spinlock_t *consumer_lock);
70 void rxe_queue_cleanup(struct rxe_queue *queue);
72 static inline int next_index(struct rxe_queue *q, int index)
74 return (index + 1) & q->buf->index_mask;
77 static inline int queue_empty(struct rxe_queue *q)
79 return ((q->buf->producer_index - q->buf->consumer_index)
80 & q->index_mask) == 0;
83 static inline int queue_full(struct rxe_queue *q)
85 return ((q->buf->producer_index + 1 - q->buf->consumer_index)
86 & q->index_mask) == 0;
89 static inline void advance_producer(struct rxe_queue *q)
91 q->buf->producer_index = (q->buf->producer_index + 1)
95 static inline void advance_consumer(struct rxe_queue *q)
97 q->buf->consumer_index = (q->buf->consumer_index + 1)
101 static inline void *producer_addr(struct rxe_queue *q)
103 return q->buf->data + ((q->buf->producer_index & q->index_mask)
104 << q->log2_elem_size);
107 static inline void *consumer_addr(struct rxe_queue *q)
109 return q->buf->data + ((q->buf->consumer_index & q->index_mask)
110 << q->log2_elem_size);
113 static inline unsigned int producer_index(struct rxe_queue *q)
115 return q->buf->producer_index;
118 static inline unsigned int consumer_index(struct rxe_queue *q)
120 return q->buf->consumer_index;
123 static inline void *addr_from_index(struct rxe_queue *q, unsigned int index)
125 return q->buf->data + ((index & q->index_mask)
126 << q->buf->log2_elem_size);
129 static inline unsigned int index_from_addr(const struct rxe_queue *q,
132 return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
136 static inline unsigned int queue_count(const struct rxe_queue *q)
138 return (q->buf->producer_index - q->buf->consumer_index)
142 static inline void *queue_head(struct rxe_queue *q)
144 return queue_empty(q) ? NULL : consumer_addr(q);
147 #endif /* RXE_QUEUE_H */