2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #ifndef _HNS_ROCE_DEVICE_H
34 #define _HNS_ROCE_DEVICE_H
36 #include <rdma/ib_verbs.h>
38 #define DRV_NAME "hns_roce"
40 #define PCI_REVISION_ID_HIP08 0x21
41 #define PCI_REVISION_ID_HIP09 0x30
43 #define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
45 #define HNS_ROCE_MAX_MSG_LEN 0x80000000
47 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6
49 #define HNS_ROCE_BA_SIZE (32 * 4096)
53 /* Hardware specification only for v1 engine */
54 #define HNS_ROCE_MIN_CQE_NUM 0x40
55 #define HNS_ROCE_MIN_WQE_NUM 0x20
57 /* Hardware specification only for v1 engine */
58 #define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
59 #define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
61 #define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
62 #define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
63 (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
64 #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
65 #define HNS_ROCE_MIN_CQE_CNT 16
67 #define HNS_ROCE_MAX_IRQ_NUM 128
69 #define HNS_ROCE_SGE_IN_WQE 2
70 #define HNS_ROCE_SGE_SHIFT 4
75 #define HNS_ROCE_CEQ 0
76 #define HNS_ROCE_AEQ 1
78 #define HNS_ROCE_CEQE_SIZE 0x4
79 #define HNS_ROCE_AEQE_SIZE 0x10
81 #define HNS_ROCE_V3_EQE_SIZE 0x40
83 #define HNS_ROCE_V2_CQE_SIZE 32
84 #define HNS_ROCE_V3_CQE_SIZE 64
86 #define HNS_ROCE_V2_QPC_SZ 256
87 #define HNS_ROCE_V3_QPC_SZ 512
89 #define HNS_ROCE_MAX_PORTS 6
90 #define HNS_ROCE_GID_SIZE 16
91 #define HNS_ROCE_SGE_SIZE 16
93 #define HNS_ROCE_HOP_NUM_0 0xff
95 #define BITMAP_NO_RR 0
98 #define MR_TYPE_MR 0x00
99 #define MR_TYPE_FRMR 0x01
100 #define MR_TYPE_DMA 0x03
102 #define HNS_ROCE_FRMR_MAX_PA 512
104 #define PKEY_ID 0xffff
106 #define NODE_DESC_SIZE 64
107 #define DB_REG_OFFSET 0x1000
109 /* Configure to HW for PAGE_SIZE larger than 4KB */
110 #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
112 #define PAGES_SHIFT_8 8
113 #define PAGES_SHIFT_16 16
114 #define PAGES_SHIFT_24 24
115 #define PAGES_SHIFT_32 32
117 #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
118 #define SRQ_DB_REG 0x230
120 /* The chip implementation of the consumer index is calculated
121 * according to twice the actual EQ depth
123 #define EQ_DEPTH_COEFF 2
133 HNS_ROCE_QP_CAP_RQ_RECORD_DB = BIT(0),
134 HNS_ROCE_QP_CAP_SQ_RECORD_DB = BIT(1),
137 enum hns_roce_cq_flags {
138 HNS_ROCE_CQ_FLAG_RECORD_DB = BIT(0),
141 enum hns_roce_qp_state {
142 HNS_ROCE_QP_STATE_RST,
143 HNS_ROCE_QP_STATE_INIT,
144 HNS_ROCE_QP_STATE_RTR,
145 HNS_ROCE_QP_STATE_RTS,
146 HNS_ROCE_QP_STATE_SQD,
147 HNS_ROCE_QP_STATE_ERR,
148 HNS_ROCE_QP_NUM_STATE,
151 enum hns_roce_event {
152 HNS_ROCE_EVENT_TYPE_PATH_MIG = 0x01,
153 HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED = 0x02,
154 HNS_ROCE_EVENT_TYPE_COMM_EST = 0x03,
155 HNS_ROCE_EVENT_TYPE_SQ_DRAINED = 0x04,
156 HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
157 HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR = 0x06,
158 HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR = 0x07,
159 HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH = 0x08,
160 HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH = 0x09,
161 HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR = 0x0a,
162 HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR = 0x0b,
163 HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW = 0x0c,
164 HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID = 0x0d,
165 HNS_ROCE_EVENT_TYPE_PORT_CHANGE = 0x0f,
166 /* 0x10 and 0x11 is unused in currently application case */
167 HNS_ROCE_EVENT_TYPE_DB_OVERFLOW = 0x12,
168 HNS_ROCE_EVENT_TYPE_MB = 0x13,
169 HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW = 0x14,
170 HNS_ROCE_EVENT_TYPE_FLR = 0x15,
173 /* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
175 HNS_ROCE_LWQCE_QPC_ERROR = 1,
176 HNS_ROCE_LWQCE_MTU_ERROR = 2,
177 HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR = 3,
178 HNS_ROCE_LWQCE_WQE_ADDR_ERROR = 4,
179 HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR = 5,
180 HNS_ROCE_LWQCE_SL_ERROR = 6,
181 HNS_ROCE_LWQCE_PORT_ERROR = 7,
184 /* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
186 HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
187 HNS_ROCE_LAVWQE_LENGTH_ERROR = 2,
188 HNS_ROCE_LAVWQE_VA_ERROR = 3,
189 HNS_ROCE_LAVWQE_PD_ERROR = 4,
190 HNS_ROCE_LAVWQE_RW_ACC_ERROR = 5,
191 HNS_ROCE_LAVWQE_KEY_STATE_ERROR = 6,
192 HNS_ROCE_LAVWQE_MR_OPERATION_ERROR = 7,
195 /* DOORBELL overflow subtype */
197 HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
198 HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF = 2,
199 HNS_ROCE_DB_SUBTYPE_ODB_OVF = 3,
200 HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF = 4,
201 HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP = 5,
202 HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP = 6,
206 /* RQ&SRQ related operations */
207 HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
208 HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
211 #define HNS_ROCE_CAP_FLAGS_EX_SHIFT 12
214 HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
215 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
216 HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
217 HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
218 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
219 HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
220 HNS_ROCE_CAP_FLAG_MW = BIT(7),
221 HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
222 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
223 HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
226 #define HNS_ROCE_DB_TYPE_COUNT 2
227 #define HNS_ROCE_DB_UNIT_SIZE 4
230 HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
233 enum hns_roce_reset_stage {
234 HNS_ROCE_STATE_NON_RST,
235 HNS_ROCE_STATE_RST_BEF_DOWN,
236 HNS_ROCE_STATE_RST_DOWN,
237 HNS_ROCE_STATE_RST_UNINIT,
238 HNS_ROCE_STATE_RST_INIT,
239 HNS_ROCE_STATE_RST_INITED,
242 enum hns_roce_instance_state {
243 HNS_ROCE_STATE_NON_INIT,
245 HNS_ROCE_STATE_INITED,
246 HNS_ROCE_STATE_UNINIT,
250 HNS_ROCE_RST_DIRECT_RETURN = 0,
259 #define HNS_ROCE_CMD_SUCCESS 1
261 #define HNS_ROCE_PORT_DOWN 0
262 #define HNS_ROCE_PORT_UP 1
264 /* The minimum page size is 4K for hardware */
265 #define HNS_HW_PAGE_SHIFT 12
266 #define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
268 /* The minimum page count for hardware access page directly. */
269 #define HNS_HW_DIRECT_PAGE_COUNT 2
271 struct hns_roce_uar {
274 unsigned long logic_idx;
277 struct hns_roce_ucontext {
278 struct ib_ucontext ibucontext;
279 struct hns_roce_uar uar;
280 struct list_head page_list;
281 struct mutex page_mutex;
289 struct hns_roce_bitmap {
290 /* Bitmap Traversal last a bit which is 1 */
294 unsigned long reserved_top;
297 unsigned long *table;
300 /* For Hardware Entry Memory */
301 struct hns_roce_hem_table {
302 /* HEM type: 0 = qpc, 1 = mtt, 2 = cqc, 3 = srq, 4 = other */
304 /* HEM array elment num */
305 unsigned long num_hem;
306 /* HEM entry record obj total num */
307 unsigned long num_obj;
308 /* Single obj size */
309 unsigned long obj_size;
310 unsigned long table_chunk_size;
313 struct hns_roce_hem **hem;
315 dma_addr_t *bt_l1_dma_addr;
317 dma_addr_t *bt_l0_dma_addr;
320 struct hns_roce_buf_region {
321 int offset; /* page offset */
322 u32 count; /* page count */
323 int hopnum; /* addressing hop num */
326 #define HNS_ROCE_MAX_BT_REGION 3
327 #define HNS_ROCE_MAX_BT_LEVEL 3
328 struct hns_roce_hem_list {
329 struct list_head root_bt;
330 /* link all bt dma mem by hop config */
331 struct list_head mid_bt[HNS_ROCE_MAX_BT_REGION][HNS_ROCE_MAX_BT_LEVEL];
332 struct list_head btm_bt; /* link all bottom bt in @mid_bt */
333 dma_addr_t root_ba; /* pointer to the root ba table */
336 struct hns_roce_buf_attr {
338 size_t size; /* region size */
339 int hopnum; /* multi-hop addressing hop num */
340 } region[HNS_ROCE_MAX_BT_REGION];
341 int region_count; /* valid region count */
342 unsigned int page_shift; /* buffer page shift */
343 bool fixed_page; /* decide page shift is fixed-size or maximum size */
344 int user_access; /* umem access flag */
345 bool mtt_only; /* only alloc buffer-required MTT memory */
348 struct hns_roce_hem_cfg {
349 dma_addr_t root_ba; /* root BA table's address */
350 bool is_direct; /* addressing without BA table */
351 unsigned int ba_pg_shift; /* BA table page shift */
352 unsigned int buf_pg_shift; /* buffer page shift */
353 unsigned int buf_pg_count; /* buffer page count */
354 struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
358 /* memory translate region */
359 struct hns_roce_mtr {
360 struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
361 struct ib_umem *umem; /* user space buffer */
362 struct hns_roce_buf *kmem; /* kernel space buffer */
363 struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */
370 int enabled; /* MW's active status */
376 /* Only support 4K page size for mr register */
381 u64 iova; /* MR's virtual orignal addr */
382 u64 size; /* Address range of MR */
383 u32 key; /* Key of MR */
384 u32 pd; /* PD num of MR */
385 u32 access; /* Access permission of MR */
386 int enabled; /* MR's active status */
387 int type; /* MR's register type */
388 u32 pbl_hop_num; /* multi-hop number */
389 struct hns_roce_mtr pbl_mtr;
391 dma_addr_t *page_list;
394 struct hns_roce_mr_table {
395 struct hns_roce_bitmap mtpt_bitmap;
396 struct hns_roce_hem_table mtpt_table;
400 u64 *wrid; /* Work request ID */
402 u32 wqe_cnt; /* WQE num */
405 int wqe_shift; /* WQE size */
408 void __iomem *db_reg_l;
411 struct hns_roce_sge {
412 unsigned int sge_cnt; /* SGE num */
414 int sge_shift; /* SGE size */
417 struct hns_roce_buf_list {
422 struct hns_roce_buf {
423 struct hns_roce_buf_list direct;
424 struct hns_roce_buf_list *page_list;
427 unsigned int page_shift;
430 struct hns_roce_db_pgdir {
431 struct list_head list;
432 DECLARE_BITMAP(order0, HNS_ROCE_DB_PER_PAGE);
433 DECLARE_BITMAP(order1, HNS_ROCE_DB_PER_PAGE / HNS_ROCE_DB_TYPE_COUNT);
434 unsigned long *bits[HNS_ROCE_DB_TYPE_COUNT];
439 struct hns_roce_user_db_page {
440 struct list_head list;
441 struct ib_umem *umem;
442 unsigned long user_virt;
449 struct hns_roce_db_pgdir *pgdir;
450 struct hns_roce_user_db_page *user_page;
460 struct hns_roce_mtr mtr;
461 struct hns_roce_db db;
467 void __iomem *cq_db_l;
474 struct completion free;
475 struct list_head sq_list; /* all qps on this send cq */
476 struct list_head rq_list; /* all qps on this recv cq */
477 int is_armed; /* cq is armed */
478 struct list_head node; /* all armed cqs are on a list */
481 struct hns_roce_idx_que {
482 struct hns_roce_mtr mtr;
484 unsigned long *bitmap;
487 struct hns_roce_srq {
493 void __iomem *db_reg_l;
496 struct completion free;
498 struct hns_roce_mtr buf_mtr;
501 struct hns_roce_idx_que idx_que;
506 void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
509 struct hns_roce_uar_table {
510 struct hns_roce_bitmap bitmap;
513 struct hns_roce_qp_table {
514 struct hns_roce_bitmap bitmap;
515 struct hns_roce_hem_table qp_table;
516 struct hns_roce_hem_table irrl_table;
517 struct hns_roce_hem_table trrl_table;
518 struct hns_roce_hem_table sccc_table;
519 struct mutex scc_mutex;
522 struct hns_roce_cq_table {
523 struct hns_roce_bitmap bitmap;
525 struct hns_roce_hem_table table;
528 struct hns_roce_srq_table {
529 struct hns_roce_bitmap bitmap;
531 struct hns_roce_hem_table table;
534 struct hns_roce_raq_table {
535 struct hns_roce_buf_list *e_raq_buf;
547 u8 dgid[HNS_ROCE_GID_SIZE];
555 struct hns_roce_av av;
558 struct hns_roce_cmd_context {
559 struct completion done;
566 struct hns_roce_cmdq {
567 struct dma_pool *pool;
568 struct mutex hcr_mutex;
569 struct semaphore poll_sem;
571 * Event mode: cmd register mutex protection,
572 * ensure to not exceed max_cmds and user use limit region
574 struct semaphore event_sem;
576 spinlock_t context_lock;
578 struct hns_roce_cmd_context *context;
580 * Result of get integer part
581 * which max_comds compute according a power of 2
585 * Process whether use event mode, init default non-zero
586 * After the event queue of cmd event ready,
587 * can switch into event mode
588 * close device, switch into poll mode(non event mode)
593 struct hns_roce_cmd_mailbox {
600 struct hns_roce_rinl_sge {
605 struct hns_roce_rinl_wqe {
606 struct hns_roce_rinl_sge *sg_list;
610 struct hns_roce_rinl_buf {
611 struct hns_roce_rinl_wqe *wqe_list;
616 HNS_ROCE_FLUSH_FLAG = 0,
619 struct hns_roce_work {
620 struct hns_roce_dev *hr_dev;
621 struct work_struct work;
630 struct hns_roce_wq rq;
631 struct hns_roce_db rdb;
632 struct hns_roce_db sdb;
633 unsigned long en_flags;
635 enum ib_sig_type sq_signal_bits;
636 struct hns_roce_wq sq;
638 struct hns_roce_mtr mtr;
651 void (*event)(struct hns_roce_qp *qp,
652 enum hns_roce_event event_type);
656 struct completion free;
658 struct hns_roce_sge sge;
660 enum ib_mtu path_mtu;
663 /* 0: flush needed, 1: unneeded */
664 unsigned long flush_flag;
665 struct hns_roce_work flush_work;
666 struct hns_roce_rinl_buf rq_inl_buf;
667 struct list_head node; /* all qps are on a list */
668 struct list_head rq_node; /* all recv qps are on a list */
669 struct list_head sq_node; /* all send qps are on a list */
672 struct hns_roce_ib_iboe {
674 struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
675 struct notifier_block nb;
676 u8 phy_port[HNS_ROCE_MAX_PORTS];
680 HNS_ROCE_EQ_STAT_INVALID = 0,
681 HNS_ROCE_EQ_STAT_VALID = 2,
684 struct hns_roce_ceqe {
689 struct hns_roce_aeqe {
727 struct hns_roce_dev *hr_dev;
728 void __iomem *doorbell;
730 int type_flag; /* Aeq:1 ceq:0 */
738 struct hns_roce_buf_list *buf_list;
743 struct hns_roce_mtr mtr;
751 struct hns_roce_eq_table {
752 struct hns_roce_eq *eq;
753 void __iomem **eqc_base; /* only for hw v1 */
756 struct hns_roce_caps {
759 int gid_table_len[HNS_ROCE_MAX_PORTS];
760 int pkey_table_len[HNS_ROCE_MAX_PORTS];
761 int local_ca_ack_delay;
779 int max_qp_init_rdma;
780 int max_qp_dest_rdma;
788 int num_comp_vectors;
789 int num_other_vectors;
809 int qpc_timer_entry_sz;
810 int cqc_timer_entry_sz;
822 u32 qpc_timer_bt_num;
825 u32 cqc_timer_bt_num;
849 u32 qpc_timer_ba_pg_sz;
850 u32 qpc_timer_buf_pg_sz;
851 u32 qpc_timer_hop_num;
852 u32 cqc_timer_ba_pg_sz;
853 u32 cqc_timer_buf_pg_sz;
854 u32 cqc_timer_hop_num;
855 u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
859 u32 srqwqe_buf_pg_sz;
870 u32 chunk_sz; /* chunk size in non multihop mode */
872 u16 default_ceq_max_cnt;
873 u16 default_ceq_period;
874 u16 default_aeq_max_cnt;
875 u16 default_aeq_period;
876 u16 default_aeq_arm_st;
877 u16 default_ceq_arm_st;
880 struct hns_roce_dfx_hw {
881 int (*query_cqc_info)(struct hns_roce_dev *hr_dev, u32 cqn,
885 enum hns_roce_device_state {
886 HNS_ROCE_DEVICE_STATE_INITED,
887 HNS_ROCE_DEVICE_STATE_RST_DOWN,
888 HNS_ROCE_DEVICE_STATE_UNINIT,
892 int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
893 int (*cmq_init)(struct hns_roce_dev *hr_dev);
894 void (*cmq_exit)(struct hns_roce_dev *hr_dev);
895 int (*hw_profile)(struct hns_roce_dev *hr_dev);
896 int (*hw_init)(struct hns_roce_dev *hr_dev);
897 void (*hw_exit)(struct hns_roce_dev *hr_dev);
898 int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
899 u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
900 u16 token, int event);
901 int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
902 int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
903 int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
904 const union ib_gid *gid, const struct ib_gid_attr *attr);
905 int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
906 void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
908 int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
909 struct hns_roce_mr *mr, unsigned long mtpt_idx);
910 int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
911 struct hns_roce_mr *mr, int flags, u32 pdn,
912 int mr_access_flags, u64 iova, u64 size,
914 int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
915 struct hns_roce_mr *mr);
916 int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
917 void (*write_cqc)(struct hns_roce_dev *hr_dev,
918 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
919 dma_addr_t dma_handle);
920 int (*set_hem)(struct hns_roce_dev *hr_dev,
921 struct hns_roce_hem_table *table, int obj, int step_idx);
922 int (*clear_hem)(struct hns_roce_dev *hr_dev,
923 struct hns_roce_hem_table *table, int obj,
925 int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
926 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
927 int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
928 int attr_mask, enum ib_qp_state cur_state,
929 enum ib_qp_state new_state);
930 int (*destroy_qp)(struct ib_qp *ibqp, struct ib_udata *udata);
931 int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
932 struct hns_roce_qp *hr_qp);
933 int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
934 const struct ib_send_wr **bad_wr);
935 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
936 const struct ib_recv_wr **bad_recv_wr);
937 int (*req_notify_cq)(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
938 int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
939 int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
940 struct ib_udata *udata);
941 int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
942 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
943 int (*init_eq)(struct hns_roce_dev *hr_dev);
944 void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
945 void (*write_srqc)(struct hns_roce_dev *hr_dev,
946 struct hns_roce_srq *srq, u32 pdn, u16 xrcd, u32 cqn,
947 void *mb_buf, u64 *mtts_wqe, u64 *mtts_idx,
948 dma_addr_t dma_handle_wqe,
949 dma_addr_t dma_handle_idx);
950 int (*modify_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
951 enum ib_srq_attr_mask srq_attr_mask,
952 struct ib_udata *udata);
953 int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
954 int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
955 const struct ib_recv_wr **bad_wr);
956 const struct ib_device_ops *hns_roce_dev_ops;
957 const struct ib_device_ops *hns_roce_dev_srq_ops;
960 struct hns_roce_dev {
961 struct ib_device ib_dev;
962 struct platform_device *pdev;
963 struct pci_dev *pci_dev;
965 struct hns_roce_uar priv_uar;
966 const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
968 spinlock_t bt_cmd_lock;
972 unsigned long reset_cnt;
973 struct hns_roce_ib_iboe iboe;
974 enum hns_roce_device_state state;
975 struct list_head qp_list; /* list of all qps on this dev */
976 spinlock_t qp_list_lock; /* protect qp_list */
978 struct list_head pgdir_list;
979 struct mutex pgdir_mutex;
980 int irq[HNS_ROCE_MAX_IRQ_NUM];
981 u8 __iomem *reg_base;
982 struct hns_roce_caps caps;
983 struct xarray qp_table_xa;
985 unsigned char dev_addr[HNS_ROCE_MAX_PORTS][ETH_ALEN];
990 void __iomem *priv_addr;
992 struct hns_roce_cmdq cmd;
993 struct hns_roce_bitmap pd_bitmap;
994 struct hns_roce_uar_table uar_table;
995 struct hns_roce_mr_table mr_table;
996 struct hns_roce_cq_table cq_table;
997 struct hns_roce_srq_table srq_table;
998 struct hns_roce_qp_table qp_table;
999 struct hns_roce_eq_table eq_table;
1000 struct hns_roce_hem_table qpc_timer_table;
1001 struct hns_roce_hem_table cqc_timer_table;
1007 dma_addr_t tptr_dma_addr; /* only for hw v1 */
1008 u32 tptr_size; /* only for hw v1 */
1009 const struct hns_roce_hw *hw;
1011 struct workqueue_struct *irq_workq;
1012 const struct hns_roce_dfx_hw *dfx;
1015 static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
1017 return container_of(ib_dev, struct hns_roce_dev, ib_dev);
1020 static inline struct hns_roce_ucontext
1021 *to_hr_ucontext(struct ib_ucontext *ibucontext)
1023 return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
1026 static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd)
1028 return container_of(ibpd, struct hns_roce_pd, ibpd);
1031 static inline struct hns_roce_ah *to_hr_ah(struct ib_ah *ibah)
1033 return container_of(ibah, struct hns_roce_ah, ibah);
1036 static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
1038 return container_of(ibmr, struct hns_roce_mr, ibmr);
1041 static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
1043 return container_of(ibmw, struct hns_roce_mw, ibmw);
1046 static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
1048 return container_of(ibqp, struct hns_roce_qp, ibqp);
1051 static inline struct hns_roce_cq *to_hr_cq(struct ib_cq *ib_cq)
1053 return container_of(ib_cq, struct hns_roce_cq, ib_cq);
1056 static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
1058 return container_of(ibsrq, struct hns_roce_srq, ibsrq);
1061 static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
1063 __raw_writeq(*(u64 *) val, dest);
1066 static inline struct hns_roce_qp
1067 *__hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, u32 qpn)
1069 return xa_load(&hr_dev->qp_table_xa, qpn & (hr_dev->caps.num_qps - 1));
1072 static inline bool hns_roce_buf_is_direct(struct hns_roce_buf *buf)
1080 static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
1082 if (hns_roce_buf_is_direct(buf))
1083 return (char *)(buf->direct.buf) + (offset & (buf->size - 1));
1085 return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
1086 (offset & ((1 << buf->page_shift) - 1));
1089 static inline dma_addr_t hns_roce_buf_page(struct hns_roce_buf *buf, int idx)
1091 if (hns_roce_buf_is_direct(buf))
1092 return buf->direct.map + ((dma_addr_t)idx << buf->page_shift);
1094 return buf->page_list[idx].map;
1097 #define hr_hw_page_align(x) ALIGN(x, 1 << HNS_HW_PAGE_SHIFT)
1099 static inline u64 to_hr_hw_page_addr(u64 addr)
1101 return addr >> HNS_HW_PAGE_SHIFT;
1104 static inline u32 to_hr_hw_page_shift(u32 page_shift)
1106 return page_shift - HNS_HW_PAGE_SHIFT;
1109 static inline u32 to_hr_hem_hopnum(u32 hopnum, u32 count)
1112 return hopnum == HNS_ROCE_HOP_NUM_0 ? 0 : hopnum;
1117 static inline u32 to_hr_hem_entries_size(u32 count, u32 buf_shift)
1119 return hr_hw_page_align(count << buf_shift);
1122 static inline u32 to_hr_hem_entries_count(u32 count, u32 buf_shift)
1124 return hr_hw_page_align(count << buf_shift) >> buf_shift;
1127 static inline u32 to_hr_hem_entries_shift(u32 count, u32 buf_shift)
1132 return ilog2(to_hr_hem_entries_count(count, buf_shift));
1135 #define DSCP_SHIFT 2
1137 static inline u8 get_tclass(const struct ib_global_route *grh)
1139 return grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP ?
1140 grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
1143 int hns_roce_init_uar_table(struct hns_roce_dev *dev);
1144 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
1145 void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
1146 void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
1148 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
1149 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
1150 void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
1152 int hns_roce_cmd_use_events(struct hns_roce_dev *hr_dev);
1153 void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
1155 /* hns roce hw need current block and next block addr from mtt */
1156 #define MTT_MIN_COUNT 2
1157 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1158 int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
1159 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1160 struct hns_roce_buf_attr *buf_attr,
1161 unsigned int page_shift, struct ib_udata *udata,
1162 unsigned long user_addr);
1163 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
1164 struct hns_roce_mtr *mtr);
1165 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1166 dma_addr_t *pages, int page_cnt);
1168 int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev);
1169 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev);
1170 int hns_roce_init_cq_table(struct hns_roce_dev *hr_dev);
1171 int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
1172 int hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
1174 void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev);
1175 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev);
1176 void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
1177 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
1178 void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
1179 void hns_roce_cleanup_srq_table(struct hns_roce_dev *hr_dev);
1181 int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj);
1182 void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
1184 int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
1185 u32 reserved_bot, u32 resetrved_top);
1186 void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap);
1187 void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev);
1188 int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
1189 int align, unsigned long *obj);
1190 void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
1191 unsigned long obj, int cnt,
1194 int hns_roce_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1195 struct ib_udata *udata);
1196 int hns_roce_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1197 static inline int hns_roce_destroy_ah(struct ib_ah *ah, u32 flags)
1202 int hns_roce_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1203 int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
1205 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
1206 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1207 u64 virt_addr, int access_flags,
1208 struct ib_udata *udata);
1209 int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
1210 u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
1211 struct ib_udata *udata);
1212 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1214 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1215 unsigned int *sg_offset);
1216 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1217 int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
1218 struct hns_roce_cmd_mailbox *mailbox,
1219 unsigned long mpt_index);
1220 unsigned long key_to_hw_index(u32 key);
1222 int hns_roce_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1223 int hns_roce_dealloc_mw(struct ib_mw *ibmw);
1225 void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf);
1226 int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
1227 struct hns_roce_buf *buf, u32 page_shift);
1229 int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1230 int buf_cnt, int start, struct hns_roce_buf *buf);
1231 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
1232 int buf_cnt, int start, struct ib_umem *umem,
1233 unsigned int page_shift);
1235 int hns_roce_create_srq(struct ib_srq *srq,
1236 struct ib_srq_init_attr *srq_init_attr,
1237 struct ib_udata *udata);
1238 int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
1239 enum ib_srq_attr_mask srq_attr_mask,
1240 struct ib_udata *udata);
1241 int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
1243 struct ib_qp *hns_roce_create_qp(struct ib_pd *ib_pd,
1244 struct ib_qp_init_attr *init_attr,
1245 struct ib_udata *udata);
1246 int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1247 int attr_mask, struct ib_udata *udata);
1248 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1249 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
1250 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, int n);
1251 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, int n);
1252 bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
1253 struct ib_cq *ib_cq);
1254 enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
1255 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq,
1256 struct hns_roce_cq *recv_cq);
1257 void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
1258 struct hns_roce_cq *recv_cq);
1259 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp);
1260 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
1261 struct ib_udata *udata);
1262 __be32 send_ieth(const struct ib_send_wr *wr);
1263 int to_hr_qp_type(int qp_type);
1265 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
1266 struct ib_udata *udata);
1268 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
1269 int hns_roce_db_map_user(struct hns_roce_ucontext *context,
1270 struct ib_udata *udata, unsigned long virt,
1271 struct hns_roce_db *db);
1272 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
1273 struct hns_roce_db *db);
1274 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
1276 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
1278 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
1279 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
1280 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
1281 void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
1282 int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
1283 void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
1284 int hns_roce_init(struct hns_roce_dev *hr_dev);
1285 void hns_roce_exit(struct hns_roce_dev *hr_dev);
1287 int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
1288 struct ib_cq *ib_cq);
1289 #endif /* _HNS_ROCE_DEVICE_H */