1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
10 #include <linux/interrupt.h>
11 #include <linux/workqueue.h>
14 #include "rxe_hw_counters.h"
16 static inline int pkey_match(u16 key1, u16 key2)
18 return (((key1 & 0x7fff) != 0) &&
19 ((key1 & 0x7fff) == (key2 & 0x7fff)) &&
20 ((key1 & 0x8000) || (key2 & 0x8000))) ? 1 : 0;
23 /* Return >0 if psn_a > psn_b
27 static inline int psn_compare(u32 psn_a, u32 psn_b)
31 diff = (psn_a - psn_b) << 8;
36 struct ib_ucontext ibuc;
37 struct rxe_pool_elem elem;
42 struct rxe_pool_elem elem;
47 struct rxe_pool_elem elem;
56 struct ib_uverbs_wc uibwc;
62 struct rxe_pool_elem elem;
63 struct rxe_queue *queue;
82 spinlock_t sq_lock; /* guard queue */
83 struct rxe_queue *queue;
89 spinlock_t producer_lock; /* guard queue producer */
90 spinlock_t consumer_lock; /* guard queue consumer */
91 struct rxe_queue *queue;
96 struct rxe_pool_elem elem;
105 struct rxe_req_info {
114 int wait_for_rnr_timer;
116 struct rxe_task task;
119 struct rxe_comp_info {
127 struct rxe_task task;
130 enum rdatm_res_state {
131 rdatm_res_state_next,
133 rdatm_res_state_replay,
142 enum rdatm_res_state state;
164 struct rxe_resp_info {
172 enum ib_wc_status status;
176 struct rxe_recv_wqe *wqe;
178 /* RDMA read / atomic only */
188 struct rxe_recv_wqe wqe;
189 struct ib_sge sge[RXE_MAX_SGE];
192 /* Responder resources. It's a circular list where the oldest
193 * resource is dropped first.
195 struct resp_res *resources;
196 unsigned int res_head;
197 unsigned int res_tail;
198 struct resp_res *res;
199 struct rxe_task task;
204 struct rxe_pool_elem elem;
205 struct ib_qp_attr attr;
215 enum ib_sig_type sq_sig_type;
224 struct rxe_av pri_av;
225 struct rxe_av alt_av;
229 struct sk_buff_head req_pkts;
230 struct sk_buff_head resp_pkts;
232 struct rxe_req_info req;
233 struct rxe_comp_info comp;
234 struct rxe_resp_info resp;
240 /* Timer for retranmitting packet when ACKs have been lost. RC
241 * only. The requester sets it when it is not already
242 * started. The responder resets it whenever an ack is
245 struct timer_list retrans_timer;
246 u64 qp_timeout_jiffies;
248 /* Timer for handling RNR NAKS. */
249 struct timer_list rnr_nak_timer;
251 spinlock_t state_lock; /* guard requester and completer */
253 struct execute_work cleanup_work;
257 RXE_ACCESS_REMOTE = IB_ACCESS_REMOTE_READ
258 | IB_ACCESS_REMOTE_WRITE
259 | IB_ACCESS_REMOTE_ATOMIC,
260 RXE_ACCESS_SUPPORTED_MR = RXE_ACCESS_REMOTE
261 | IB_ACCESS_LOCAL_WRITE
263 | IB_ACCESS_ON_DEMAND
264 | IB_ACCESS_FLUSH_GLOBAL
265 | IB_ACCESS_FLUSH_PERSISTENT
266 | IB_ACCESS_OPTIONAL,
267 RXE_ACCESS_SUPPORTED_QP = RXE_ACCESS_SUPPORTED_MR,
268 RXE_ACCESS_SUPPORTED_MW = RXE_ACCESS_SUPPORTED_MR
273 RXE_MR_STATE_INVALID,
278 enum rxe_mr_copy_dir {
283 enum rxe_mr_lookup_type {
289 RXE_MR_REREG_SUPPORTED = IB_MR_REREG_PD
290 | IB_MR_REREG_ACCESS,
293 static inline int rkey_is_mw(u32 rkey)
295 u32 index = rkey >> 8;
297 return (index >= RXE_MIN_MW_INDEX) && (index <= RXE_MAX_MW_INDEX);
301 struct rxe_pool_elem elem;
304 struct ib_umem *umem;
308 enum rxe_mr_state state;
312 unsigned int page_offset;
313 unsigned int page_shift;
319 struct xarray page_list;
322 static inline unsigned int mr_page_size(struct rxe_mr *mr)
324 return mr ? mr->ibmr.page_size : PAGE_SIZE;
328 RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
329 RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
330 RXE_MW_STATE_VALID = RXE_MR_STATE_VALID,
335 struct rxe_pool_elem elem;
337 enum rxe_mw_state state;
338 struct rxe_qp *qp; /* Type 2 only */
350 struct list_head qp_list;
358 struct list_head qp_list;
363 struct ib_port_attr attr;
365 __be64 subnet_prefix;
366 spinlock_t port_lock; /* guard port */
367 unsigned int mtu_cap;
373 struct ib_device ib_dev;
374 struct ib_device_attr attr;
377 struct mutex usdev_lock;
379 struct net_device *ndev;
381 struct rxe_pool uc_pool;
382 struct rxe_pool pd_pool;
383 struct rxe_pool ah_pool;
384 struct rxe_pool srq_pool;
385 struct rxe_pool qp_pool;
386 struct rxe_pool cq_pool;
387 struct rxe_pool mr_pool;
388 struct rxe_pool mw_pool;
390 /* multicast support */
392 struct rb_root mcg_tree;
396 spinlock_t pending_lock; /* guard pending_mmaps */
397 struct list_head pending_mmaps;
399 spinlock_t mmap_offset_lock; /* guard mmap_offset */
402 atomic64_t stats_counters[RXE_NUM_OF_COUNTERS];
404 struct rxe_port port;
405 struct crypto_shash *tfm;
408 static inline void rxe_counter_inc(struct rxe_dev *rxe, enum rxe_counters index)
410 atomic64_inc(&rxe->stats_counters[index]);
413 static inline struct rxe_dev *to_rdev(struct ib_device *dev)
415 return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL;
418 static inline struct rxe_ucontext *to_ruc(struct ib_ucontext *uc)
420 return uc ? container_of(uc, struct rxe_ucontext, ibuc) : NULL;
423 static inline struct rxe_pd *to_rpd(struct ib_pd *pd)
425 return pd ? container_of(pd, struct rxe_pd, ibpd) : NULL;
428 static inline struct rxe_ah *to_rah(struct ib_ah *ah)
430 return ah ? container_of(ah, struct rxe_ah, ibah) : NULL;
433 static inline struct rxe_srq *to_rsrq(struct ib_srq *srq)
435 return srq ? container_of(srq, struct rxe_srq, ibsrq) : NULL;
438 static inline struct rxe_qp *to_rqp(struct ib_qp *qp)
440 return qp ? container_of(qp, struct rxe_qp, ibqp) : NULL;
443 static inline struct rxe_cq *to_rcq(struct ib_cq *cq)
445 return cq ? container_of(cq, struct rxe_cq, ibcq) : NULL;
448 static inline struct rxe_mr *to_rmr(struct ib_mr *mr)
450 return mr ? container_of(mr, struct rxe_mr, ibmr) : NULL;
453 static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
455 return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
458 static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
460 return to_rpd(ah->ibah.pd);
463 static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
465 return to_rpd(mr->ibmr.pd);
468 static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
470 return to_rpd(mw->ibmw.pd);
473 int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
475 #endif /* RXE_VERBS_H */