1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
12 MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
15 struct mlx5_wqe_eth_pad {
20 /* get_sq_edge - Get the next nearby edge.
22 * An 'edge' is defined as the first following address after the end
23 * of the fragment or the SQ. Accordingly, during the WQE construction
24 * which repetitively increases the pointer to write the next data, it
25 * simply should check if it gets to an edge.
28 * @idx - Stride index in the SQ buffer.
33 static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
37 fragment_end = mlx5_frag_buf_get_wqe
39 mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
41 return fragment_end + MLX5_SEND_WQE_BB;
44 int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
45 const struct ib_send_wr **bad_wr, bool drain);
46 int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
47 const struct ib_recv_wr **bad_wr, bool drain);
49 static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
50 const struct ib_send_wr *wr,
51 const struct ib_send_wr **bad_wr)
53 return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
56 static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
57 const struct ib_send_wr *wr,
58 const struct ib_send_wr **bad_wr)
60 return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
63 static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
64 const struct ib_recv_wr *wr,
65 const struct ib_recv_wr **bad_wr)
67 return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
70 static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
71 const struct ib_recv_wr *wr,
72 const struct ib_recv_wr **bad_wr)
74 return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
76 #endif /* _MLX5_IB_WR_H */