2 * Copyright (c) 2006, 2007 Cisco Systems. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/compiler.h>
38 #include <linux/list.h>
39 #include <linux/mutex.h>
40 #include <linux/idr.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_umem.h>
44 #include <rdma/ib_mad.h>
45 #include <rdma/ib_sa.h>
47 #include <linux/mlx4/device.h>
48 #include <linux/mlx4/doorbell.h>
49 #include <linux/mlx4/qp.h>
51 #define MLX4_IB_DRV_NAME "mlx4_ib"
56 #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
58 #define mlx4_ib_warn(ibdev, format, arg...) \
59 dev_warn((ibdev)->dev.parent, MLX4_IB_DRV_NAME ": " format, ## arg)
62 MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
63 MLX4_IB_MAX_HEADROOM = 2048
66 #define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
67 #define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
69 /*module param to indicate if SM assigns the alias_GUID*/
70 extern int mlx4_ib_sm_guid_assign;
72 #define MLX4_IB_UC_STEER_QPN_ALIGN 1
73 #define MLX4_IB_UC_MAX_NUM_QPS 256
82 struct mlx4_ib_vma_private_data {
83 struct vm_area_struct *vma;
86 struct mlx4_ib_ucontext {
87 struct ib_ucontext ibucontext;
89 struct list_head db_page_list;
90 struct mutex db_page_mutex;
91 struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
92 struct list_head wqn_ranges_list;
93 struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
101 struct mlx4_ib_xrcd {
102 struct ib_xrcd ibxrcd;
108 struct mlx4_ib_cq_buf {
114 struct mlx4_ib_cq_resize {
115 struct mlx4_ib_cq_buf buf;
122 struct mlx4_ib_cq_buf buf;
123 struct mlx4_ib_cq_resize *resize_buf;
126 struct mutex resize_mutex;
127 struct ib_umem *umem;
128 struct ib_umem *resize_umem;
130 /* List of qps that it serves.*/
131 struct list_head send_qp_list;
132 struct list_head recv_qp_list;
135 #define MLX4_MR_PAGES_ALIGN 0x40
144 struct ib_umem *umem;
145 size_t page_map_size;
155 struct mlx4_fmr mfmr;
158 #define MAX_REGS_PER_FLOW 2
160 struct mlx4_flow_reg_id {
165 struct mlx4_ib_flow {
166 struct ib_flow ibflow;
167 /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
168 struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
184 MLX4_IB_QP_CREATE_ROCE_V2_GSI = IB_QP_CREATE_RESERVED_START
187 enum mlx4_ib_qp_flags {
188 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
189 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
190 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
192 /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
193 MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
194 MLX4_IB_SRIOV_TUNNEL_QP = 1 << 30,
195 MLX4_IB_SRIOV_SQP = 1 << 31,
198 struct mlx4_ib_gid_entry {
199 struct list_head list;
205 enum mlx4_ib_qp_type {
207 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
208 * here (and in that order) since the MAD layer uses them as
209 * indices into a 2-entry table.
211 MLX4_IB_QPT_SMI = IB_QPT_SMI,
212 MLX4_IB_QPT_GSI = IB_QPT_GSI,
214 MLX4_IB_QPT_RC = IB_QPT_RC,
215 MLX4_IB_QPT_UC = IB_QPT_UC,
216 MLX4_IB_QPT_UD = IB_QPT_UD,
217 MLX4_IB_QPT_RAW_IPV6 = IB_QPT_RAW_IPV6,
218 MLX4_IB_QPT_RAW_ETHERTYPE = IB_QPT_RAW_ETHERTYPE,
219 MLX4_IB_QPT_RAW_PACKET = IB_QPT_RAW_PACKET,
220 MLX4_IB_QPT_XRC_INI = IB_QPT_XRC_INI,
221 MLX4_IB_QPT_XRC_TGT = IB_QPT_XRC_TGT,
223 MLX4_IB_QPT_PROXY_SMI_OWNER = 1 << 16,
224 MLX4_IB_QPT_PROXY_SMI = 1 << 17,
225 MLX4_IB_QPT_PROXY_GSI = 1 << 18,
226 MLX4_IB_QPT_TUN_SMI_OWNER = 1 << 19,
227 MLX4_IB_QPT_TUN_SMI = 1 << 20,
228 MLX4_IB_QPT_TUN_GSI = 1 << 21,
231 #define MLX4_IB_QPT_ANY_SRIOV (MLX4_IB_QPT_PROXY_SMI_OWNER | \
232 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER | \
233 MLX4_IB_QPT_TUN_SMI | MLX4_IB_QPT_TUN_GSI)
235 enum mlx4_ib_mad_ifc_flags {
236 MLX4_MAD_IFC_IGNORE_MKEY = 1,
237 MLX4_MAD_IFC_IGNORE_BKEY = 2,
238 MLX4_MAD_IFC_IGNORE_KEYS = (MLX4_MAD_IFC_IGNORE_MKEY |
239 MLX4_MAD_IFC_IGNORE_BKEY),
240 MLX4_MAD_IFC_NET_VIEW = 4,
244 MLX4_NUM_TUNNEL_BUFS = 256,
247 struct mlx4_ib_tunnel_header {
262 struct mlx4_rcv_tunnel_hdr {
263 __be32 flags_src_qp; /* flags[6:5] is defined for VLANs:
264 * 0x0 - no vlan was in the packet
265 * 0x01 - C-VLAN was in the packet */
266 u8 g_ml_path; /* gid bit stands for ipv6/4 header in RoCE */
270 __be16 slid_mac_47_32;
274 struct mlx4_ib_proxy_sqp_hdr {
276 struct mlx4_rcv_tunnel_hdr tun;
279 struct mlx4_roce_smac_vlan_info {
284 int candidate_smac_index;
285 int candidate_smac_port;
290 int candidate_vlan_index;
291 int candidate_vlan_port;
295 struct mlx4_wqn_range {
300 struct list_head list;
304 unsigned int base_qpn_tbl_sz;
306 u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
318 struct mlx4_ib_wq rq;
321 __be32 sq_signal_bits;
322 unsigned sq_next_wqe;
323 int sq_max_wqes_per_wr;
325 struct mlx4_ib_wq sq;
327 enum mlx4_ib_qp_type mlx4_ib_qp_type;
328 struct ib_umem *umem;
342 struct list_head gid_list;
343 struct list_head steering_rules;
344 struct mlx4_ib_buf *sqp_proxy_rcv;
345 struct mlx4_roce_smac_vlan_info pri;
346 struct mlx4_roce_smac_vlan_info alt;
348 struct list_head qps_list;
349 struct list_head cq_recv_list;
350 struct list_head cq_send_list;
351 struct counter_index *counter_index;
352 struct mlx4_wqn_range *wqn_range;
353 /* Number of RSS QP parents that uses this WQ */
355 struct mlx4_ib_rss *rss_ctx;
360 struct mlx4_srq msrq;
368 struct ib_umem *umem;
375 union mlx4_ext_av av;
378 /****************************************/
379 /* alias guid support */
380 /****************************************/
381 #define NUM_PORT_ALIAS_GUID 2
382 #define NUM_ALIAS_GUID_IN_REC 8
383 #define NUM_ALIAS_GUID_REC_IN_PORT 16
384 #define GUID_REC_SIZE 8
385 #define NUM_ALIAS_GUID_PER_PORT 128
386 #define MLX4_NOT_SET_GUID (0x00LL)
387 #define MLX4_GUID_FOR_DELETE_VAL (~(0x00LL))
389 enum mlx4_guid_alias_rec_status {
390 MLX4_GUID_INFO_STATUS_IDLE,
391 MLX4_GUID_INFO_STATUS_SET,
394 #define GUID_STATE_NEED_PORT_INIT 0x01
396 enum mlx4_guid_alias_rec_method {
397 MLX4_GUID_INFO_RECORD_SET = IB_MGMT_METHOD_SET,
398 MLX4_GUID_INFO_RECORD_DELETE = IB_SA_METHOD_DELETE,
401 struct mlx4_sriov_alias_guid_info_rec_det {
402 u8 all_recs[GUID_REC_SIZE * NUM_ALIAS_GUID_IN_REC];
403 ib_sa_comp_mask guid_indexes; /*indicates what from the 8 records are valid*/
404 enum mlx4_guid_alias_rec_status status; /*indicates the administraively status of the record.*/
405 unsigned int guids_retry_schedule[NUM_ALIAS_GUID_IN_REC];
409 struct mlx4_sriov_alias_guid_port_rec_det {
410 struct mlx4_sriov_alias_guid_info_rec_det all_rec_per_port[NUM_ALIAS_GUID_REC_IN_PORT];
411 struct workqueue_struct *wq;
412 struct delayed_work alias_guid_work;
415 struct mlx4_sriov_alias_guid *parent;
416 struct list_head cb_list;
419 struct mlx4_sriov_alias_guid {
420 struct mlx4_sriov_alias_guid_port_rec_det ports_guid[MLX4_MAX_PORTS];
421 spinlock_t ag_work_lock;
422 struct ib_sa_client *sa_client;
425 struct mlx4_ib_demux_work {
426 struct work_struct work;
427 struct mlx4_ib_dev *dev;
434 struct mlx4_ib_tun_tx_buf {
435 struct mlx4_ib_buf buf;
439 struct mlx4_ib_demux_pv_qp {
441 enum ib_qp_type proxy_qpt;
442 struct mlx4_ib_buf *ring;
443 struct mlx4_ib_tun_tx_buf *tx_ring;
449 enum mlx4_ib_demux_pv_state {
451 DEMUX_PV_STATE_STARTING,
452 DEMUX_PV_STATE_ACTIVE,
453 DEMUX_PV_STATE_DOWNING,
456 struct mlx4_ib_demux_pv_ctx {
459 enum mlx4_ib_demux_pv_state state;
461 struct ib_device *ib_dev;
464 struct work_struct work;
465 struct workqueue_struct *wq;
466 struct workqueue_struct *wi_wq;
467 struct mlx4_ib_demux_pv_qp qp[2];
470 struct mlx4_ib_demux_ctx {
471 struct ib_device *ib_dev;
473 struct workqueue_struct *wq;
474 struct workqueue_struct *wi_wq;
475 struct workqueue_struct *ud_wq;
477 atomic64_t subnet_prefix;
478 __be64 guid_cache[128];
479 struct mlx4_ib_dev *dev;
480 /* the following lock protects both mcg_table and mcg_mgid0_list */
481 struct mutex mcg_table_lock;
482 struct rb_root mcg_table;
483 struct list_head mcg_mgid0_list;
484 struct workqueue_struct *mcg_wq;
485 struct mlx4_ib_demux_pv_ctx **tun;
487 int flushing; /* flushing the work queue */
490 struct mlx4_ib_sriov {
491 struct mlx4_ib_demux_ctx demux[MLX4_MAX_PORTS];
492 struct mlx4_ib_demux_pv_ctx *sqps[MLX4_MAX_PORTS];
493 /* when using this spinlock you should use "irq" because
494 * it may be called from interrupt context.*/
495 spinlock_t going_down_lock;
498 struct mlx4_sriov_alias_guid alias_guid;
500 /* CM paravirtualization fields */
501 struct list_head cm_list;
502 spinlock_t id_map_lock;
503 struct rb_root sl_id_map;
504 struct idr pv_id_table;
507 struct gid_cache_context {
514 enum ib_gid_type gid_type;
515 struct gid_cache_context *ctx;
518 struct mlx4_port_gid_table {
519 struct gid_entry gids[MLX4_MAX_PORT_GIDS];
522 struct mlx4_ib_iboe {
524 struct net_device *netdevs[MLX4_MAX_PORTS];
525 atomic64_t mac[MLX4_MAX_PORTS];
526 struct notifier_block nb;
527 struct mlx4_port_gid_table gids[MLX4_MAX_PORTS];
531 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
532 u16 phys_pkey_cache[MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
533 struct list_head pkey_port_list[MLX4_MFUNC_MAX];
534 struct kobject *device_parent[MLX4_MFUNC_MAX];
537 struct mlx4_ib_iov_sysfs_attr {
539 struct kobject *kobj;
543 struct device_attribute dentry;
547 struct mlx4_ib_iov_sysfs_attr_ar {
548 struct mlx4_ib_iov_sysfs_attr dentries[3 * NUM_ALIAS_GUID_PER_PORT + 1];
551 struct mlx4_ib_iov_port {
554 struct mlx4_ib_dev *dev;
555 struct list_head list;
556 struct mlx4_ib_iov_sysfs_attr_ar *dentr_ar;
557 struct ib_port_attr attr;
558 struct kobject *cur_port;
559 struct kobject *admin_alias_parent;
560 struct kobject *gids_parent;
561 struct kobject *pkeys_parent;
562 struct kobject *mcgs_parent;
563 struct mlx4_ib_iov_sysfs_attr mcg_dentry;
566 struct counter_index {
567 struct list_head list;
572 struct mlx4_ib_counters {
573 struct list_head counters_list;
574 struct mutex mutex; /* mutex for accessing counters list */
578 #define MLX4_DIAG_COUNTERS_TYPES 2
580 struct mlx4_ib_diag_counters {
587 struct ib_device ib_dev;
588 struct mlx4_dev *dev;
590 void __iomem *uar_map;
592 struct mlx4_uar priv_uar;
594 MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
596 struct ib_mad_agent *send_agent[MLX4_MAX_PORTS][2];
597 struct ib_ah *sm_ah[MLX4_MAX_PORTS];
599 atomic64_t sl2vl[MLX4_MAX_PORTS];
600 struct mlx4_ib_sriov sriov;
602 struct mutex cap_mask_mutex;
604 struct mlx4_ib_iboe iboe;
605 struct mlx4_ib_counters counters_table[MLX4_MAX_PORTS];
607 struct kobject *iov_parent;
608 struct kobject *ports_parent;
609 struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
610 struct mlx4_ib_iov_port iov_ports[MLX4_MAX_PORTS];
611 struct pkey_mgt pkeys;
612 unsigned long *ib_uc_qpns_bitmap;
615 int steering_support;
616 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
617 /* lock when destroying qp1_proxy and getting netdev events */
618 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
620 /* protect resources needed as part of reset flow */
621 spinlock_t reset_flow_resource_lock;
622 struct list_head qp_list;
623 struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
626 struct ib_event_work {
627 struct work_struct work;
628 struct mlx4_ib_dev *ib_dev;
629 struct mlx4_eqe ib_eqe;
633 struct mlx4_ib_qp_tunnel_init_attr {
634 struct ib_qp_init_attr init_attr;
636 enum ib_qp_type proxy_qp_type;
640 struct mlx4_uverbs_ex_query_device {
645 enum query_device_resp_mask {
646 QUERY_DEVICE_RESP_MASK_TIMESTAMP = 1UL << 0,
649 struct mlx4_uverbs_ex_query_device_resp {
651 __u32 response_length;
652 __u64 hca_core_clock_offset;
653 __u32 max_inl_recv_sz;
657 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
659 return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
662 static inline struct mlx4_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
664 return container_of(ibucontext, struct mlx4_ib_ucontext, ibucontext);
667 static inline struct mlx4_ib_pd *to_mpd(struct ib_pd *ibpd)
669 return container_of(ibpd, struct mlx4_ib_pd, ibpd);
672 static inline struct mlx4_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
674 return container_of(ibxrcd, struct mlx4_ib_xrcd, ibxrcd);
677 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq)
679 return container_of(ibcq, struct mlx4_ib_cq, ibcq);
682 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq)
684 return container_of(mcq, struct mlx4_ib_cq, mcq);
687 static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)
689 return container_of(ibmr, struct mlx4_ib_mr, ibmr);
692 static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw)
694 return container_of(ibmw, struct mlx4_ib_mw, ibmw);
697 static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
699 return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
702 static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
704 return container_of(ibflow, struct mlx4_ib_flow, ibflow);
707 static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
709 return container_of(ibqp, struct mlx4_ib_qp, ibqp);
712 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp)
714 return container_of(mqp, struct mlx4_ib_qp, mqp);
717 static inline struct mlx4_ib_srq *to_msrq(struct ib_srq *ibsrq)
719 return container_of(ibsrq, struct mlx4_ib_srq, ibsrq);
722 static inline struct mlx4_ib_srq *to_mibsrq(struct mlx4_srq *msrq)
724 return container_of(msrq, struct mlx4_ib_srq, msrq);
727 static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
729 return container_of(ibah, struct mlx4_ib_ah, ibah);
732 static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
734 dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
736 return dev->bond_next_port + 1;
739 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
740 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
742 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
744 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
746 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
747 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
748 struct ib_umem *umem);
749 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
750 u64 virt_addr, int access_flags,
751 struct ib_udata *udata);
752 int mlx4_ib_dereg_mr(struct ib_mr *mr);
753 struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
754 struct ib_udata *udata);
755 int mlx4_ib_dealloc_mw(struct ib_mw *mw);
756 struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
757 enum ib_mr_type mr_type,
759 int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
760 unsigned int *sg_offset);
761 int mlx4_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
762 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
763 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
764 const struct ib_cq_init_attr *attr,
765 struct ib_ucontext *context,
766 struct ib_udata *udata);
767 int mlx4_ib_destroy_cq(struct ib_cq *cq);
768 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
769 int mlx4_ib_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
770 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
771 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
773 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
774 struct ib_udata *udata);
775 int mlx4_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
776 int mlx4_ib_destroy_ah(struct ib_ah *ah);
778 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
779 struct ib_srq_init_attr *init_attr,
780 struct ib_udata *udata);
781 int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
782 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
783 int mlx4_ib_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
784 int mlx4_ib_destroy_srq(struct ib_srq *srq);
785 void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index);
786 int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
787 struct ib_recv_wr **bad_wr);
789 struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
790 struct ib_qp_init_attr *init_attr,
791 struct ib_udata *udata);
792 int mlx4_ib_destroy_qp(struct ib_qp *qp);
793 int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
794 int attr_mask, struct ib_udata *udata);
795 int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
796 struct ib_qp_init_attr *qp_init_attr);
797 int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
798 struct ib_send_wr **bad_wr);
799 int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
800 struct ib_recv_wr **bad_wr);
802 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
803 int port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
804 const void *in_mad, void *response_mad);
805 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
806 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
807 const struct ib_mad_hdr *in, size_t in_mad_size,
808 struct ib_mad_hdr *out, size_t *out_mad_size,
809 u16 *out_mad_pkey_index);
810 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
811 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
813 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int mr_access_flags,
814 struct ib_fmr_attr *fmr_attr);
815 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages,
817 int mlx4_ib_unmap_fmr(struct list_head *fmr_list);
818 int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr);
819 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
820 struct ib_port_attr *props, int netw_view);
821 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
822 u16 *pkey, int netw_view);
824 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
825 union ib_gid *gid, int netw_view);
827 static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
829 u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;
831 if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET)
834 return !!(ah->av.ib.g_slid & 0x80);
837 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx);
838 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq);
839 void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave);
840 int mlx4_ib_mcg_init(void);
841 void mlx4_ib_mcg_destroy(void);
843 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid);
845 int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, int slave,
846 struct ib_sa_mad *sa_mad);
847 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
848 struct ib_sa_mad *mad);
850 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
853 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
854 enum ib_event_type type);
856 void mlx4_ib_tunnels_update_work(struct work_struct *work);
858 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
859 enum ib_qp_type qpt, struct ib_wc *wc,
860 struct ib_grh *grh, struct ib_mad *mad);
862 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
863 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
864 u32 qkey, struct rdma_ah_attr *attr, u8 *s_mac,
865 u16 vlan_id, struct ib_mad *mad);
867 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx);
869 int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
872 int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
875 void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev);
876 void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave_id);
878 /* alias guid support */
879 void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port);
880 int mlx4_ib_init_alias_guid_service(struct mlx4_ib_dev *dev);
881 void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev);
882 void mlx4_ib_invalidate_all_guid_record(struct mlx4_ib_dev *dev, int port);
884 void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
886 u8 port_num, u8 *p_data);
888 void mlx4_ib_update_cache_on_guid_change(struct mlx4_ib_dev *dev,
889 int block_num, u8 port_num,
892 int add_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
893 struct attribute *attr);
894 void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
895 struct attribute *attr);
896 ib_sa_comp_mask mlx4_ib_get_aguid_comp_mask_from_ix(int index);
897 void mlx4_ib_slave_alias_guid_event(struct mlx4_ib_dev *dev, int slave,
898 int port, int slave_init);
900 int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *device) ;
902 void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device);
904 __be64 mlx4_ib_gen_node_guid(void);
906 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
907 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
908 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
910 int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
911 u64 start, u64 length, u64 virt_addr,
912 int mr_access_flags, struct ib_pd *pd,
913 struct ib_udata *udata);
914 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
915 u8 port_num, int index);
917 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
920 void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port);
922 struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
923 struct ib_wq_init_attr *init_attr,
924 struct ib_udata *udata);
925 int mlx4_ib_destroy_wq(struct ib_wq *wq);
926 int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
927 u32 wq_attr_mask, struct ib_udata *udata);
929 struct ib_rwq_ind_table
930 *mlx4_ib_create_rwq_ind_table(struct ib_device *device,
931 struct ib_rwq_ind_table_init_attr *init_attr,
932 struct ib_udata *udata);
933 int mlx4_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
935 #endif /* MLX4_IB_H */