2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/errno.h>
38 #include <linux/pci.h>
39 #include <linux/interrupt.h>
40 #include <linux/gfp.h>
42 #include "mthca_dev.h"
43 #include "mthca_config_reg.h"
44 #include "mthca_cmd.h"
45 #include "mthca_profile.h"
46 #include "mthca_memfree.h"
47 #include "mthca_wqe.h"
49 MODULE_AUTHOR("Roland Dreier");
50 MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_VERSION(DRV_VERSION);
54 #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
56 int mthca_debug_level = 0;
57 module_param_named(debug_level, mthca_debug_level, int, 0644);
58 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
60 #endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */
65 module_param(msi_x, int, 0444);
66 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
68 #else /* CONFIG_PCI_MSI */
72 #endif /* CONFIG_PCI_MSI */
74 static int tune_pci = 0;
75 module_param(tune_pci, int, 0444);
76 MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
78 DEFINE_MUTEX(mthca_device_mutex);
80 #define MTHCA_DEFAULT_NUM_QP (1 << 16)
81 #define MTHCA_DEFAULT_RDB_PER_QP (1 << 2)
82 #define MTHCA_DEFAULT_NUM_CQ (1 << 16)
83 #define MTHCA_DEFAULT_NUM_MCG (1 << 13)
84 #define MTHCA_DEFAULT_NUM_MPT (1 << 17)
85 #define MTHCA_DEFAULT_NUM_MTT (1 << 20)
86 #define MTHCA_DEFAULT_NUM_UDAV (1 << 15)
87 #define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18)
88 #define MTHCA_DEFAULT_NUM_UARC_SIZE (1 << 18)
90 static struct mthca_profile hca_profile = {
91 .num_qp = MTHCA_DEFAULT_NUM_QP,
92 .rdb_per_qp = MTHCA_DEFAULT_RDB_PER_QP,
93 .num_cq = MTHCA_DEFAULT_NUM_CQ,
94 .num_mcg = MTHCA_DEFAULT_NUM_MCG,
95 .num_mpt = MTHCA_DEFAULT_NUM_MPT,
96 .num_mtt = MTHCA_DEFAULT_NUM_MTT,
97 .num_udav = MTHCA_DEFAULT_NUM_UDAV, /* Tavor only */
98 .fmr_reserved_mtts = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */
99 .uarc_size = MTHCA_DEFAULT_NUM_UARC_SIZE, /* Arbel only */
102 module_param_named(num_qp, hca_profile.num_qp, int, 0444);
103 MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA");
105 module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444);
106 MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP");
108 module_param_named(num_cq, hca_profile.num_cq, int, 0444);
109 MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA");
111 module_param_named(num_mcg, hca_profile.num_mcg, int, 0444);
112 MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA");
114 module_param_named(num_mpt, hca_profile.num_mpt, int, 0444);
115 MODULE_PARM_DESC(num_mpt,
116 "maximum number of memory protection table entries per HCA");
118 module_param_named(num_mtt, hca_profile.num_mtt, int, 0444);
119 MODULE_PARM_DESC(num_mtt,
120 "maximum number of memory translation table segments per HCA");
122 module_param_named(num_udav, hca_profile.num_udav, int, 0444);
123 MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA");
125 module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
126 MODULE_PARM_DESC(fmr_reserved_mtts,
127 "number of memory translation table segments reserved for FMR");
129 static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
130 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
131 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
133 static char mthca_version[] =
134 DRV_NAME ": Mellanox InfiniBand HCA driver v"
135 DRV_VERSION " (" DRV_RELDATE ")\n";
137 static int mthca_tune_pci(struct mthca_dev *mdev)
142 /* First try to max out Read Byte Count */
143 if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
144 if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
145 mthca_err(mdev, "Couldn't set PCI-X max read count, "
149 } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
150 mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
152 if (pci_is_pcie(mdev->pdev)) {
153 if (pcie_set_readrq(mdev->pdev, 4096)) {
154 mthca_err(mdev, "Couldn't write PCI Express read request, "
158 } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
159 mthca_info(mdev, "No PCI Express capability, "
160 "not setting Max Read Request Size.\n");
165 static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
169 mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
170 err = mthca_QUERY_DEV_LIM(mdev, dev_lim);
172 mthca_err(mdev, "QUERY_DEV_LIM command returned %d"
173 ", aborting.\n", err);
176 if (dev_lim->min_page_sz > PAGE_SIZE) {
177 mthca_err(mdev, "HCA minimum page size of %d bigger than "
178 "kernel PAGE_SIZE of %ld, aborting.\n",
179 dev_lim->min_page_sz, PAGE_SIZE);
182 if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
183 mthca_err(mdev, "HCA has %d ports, but we only support %d, "
185 dev_lim->num_ports, MTHCA_MAX_PORTS);
189 if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
190 mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
191 "PCI resource 2 size of 0x%llx, aborting.\n",
193 (unsigned long long)pci_resource_len(mdev->pdev, 2));
197 mdev->limits.num_ports = dev_lim->num_ports;
198 mdev->limits.vl_cap = dev_lim->max_vl;
199 mdev->limits.mtu_cap = dev_lim->max_mtu;
200 mdev->limits.gid_table_len = dev_lim->max_gids;
201 mdev->limits.pkey_table_len = dev_lim->max_pkeys;
202 mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
204 * Need to allow for worst case send WQE overhead and check
205 * whether max_desc_sz imposes a lower limit than max_sg; UD
206 * send has the biggest overhead.
208 mdev->limits.max_sg = min_t(int, dev_lim->max_sg,
209 (dev_lim->max_desc_sz -
210 sizeof (struct mthca_next_seg) -
211 (mthca_is_memfree(mdev) ?
212 sizeof (struct mthca_arbel_ud_seg) :
213 sizeof (struct mthca_tavor_ud_seg))) /
214 sizeof (struct mthca_data_seg));
215 mdev->limits.max_wqes = dev_lim->max_qp_sz;
216 mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp;
217 mdev->limits.reserved_qps = dev_lim->reserved_qps;
218 mdev->limits.max_srq_wqes = dev_lim->max_srq_sz;
219 mdev->limits.reserved_srqs = dev_lim->reserved_srqs;
220 mdev->limits.reserved_eecs = dev_lim->reserved_eecs;
221 mdev->limits.max_desc_sz = dev_lim->max_desc_sz;
222 mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev);
224 * Subtract 1 from the limit because we need to allocate a
225 * spare CQE so the HCA HW can tell the difference between an
226 * empty CQ and a full CQ.
228 mdev->limits.max_cqes = dev_lim->max_cq_sz - 1;
229 mdev->limits.reserved_cqs = dev_lim->reserved_cqs;
230 mdev->limits.reserved_eqs = dev_lim->reserved_eqs;
231 mdev->limits.reserved_mtts = dev_lim->reserved_mtts;
232 mdev->limits.reserved_mrws = dev_lim->reserved_mrws;
233 mdev->limits.reserved_uars = dev_lim->reserved_uars;
234 mdev->limits.reserved_pds = dev_lim->reserved_pds;
235 mdev->limits.port_width_cap = dev_lim->max_port_width;
236 mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1);
237 mdev->limits.flags = dev_lim->flags;
239 * For old FW that doesn't return static rate support, use a
240 * value of 0x3 (only static rate values of 0 or 1 are handled),
241 * except on Sinai, where even old FW can handle static rate
244 if (dev_lim->stat_rate_support)
245 mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
246 else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
247 mdev->limits.stat_rate_support = 0xf;
249 mdev->limits.stat_rate_support = 0x3;
251 /* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
252 May be doable since hardware supports it for SRQ.
254 IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
256 IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
257 supported by driver. */
258 mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
259 IB_DEVICE_PORT_ACTIVE_EVENT |
260 IB_DEVICE_SYS_IMAGE_GUID |
261 IB_DEVICE_RC_RNR_NAK_GEN;
263 if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
264 mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
266 if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
267 mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
269 if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
270 mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
272 if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
273 mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
275 if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
276 mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
278 if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
279 mdev->mthca_flags |= MTHCA_FLAG_SRQ;
281 if (mthca_is_memfree(mdev))
282 if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
283 mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
288 static int mthca_init_tavor(struct mthca_dev *mdev)
292 struct mthca_dev_lim dev_lim;
293 struct mthca_profile profile;
294 struct mthca_init_hca_param init_hca;
296 err = mthca_SYS_EN(mdev);
298 mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err);
302 err = mthca_QUERY_FW(mdev);
304 mthca_err(mdev, "QUERY_FW command returned %d,"
305 " aborting.\n", err);
308 err = mthca_QUERY_DDR(mdev);
310 mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err);
314 err = mthca_dev_lim(mdev, &dev_lim);
316 mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err);
320 profile = hca_profile;
321 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
322 profile.uarc_size = 0;
323 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
324 profile.num_srq = dev_lim.max_srqs;
326 size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
332 err = mthca_INIT_HCA(mdev, &init_hca);
334 mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
346 static int mthca_load_fw(struct mthca_dev *mdev)
350 /* FIXME: use HCA-attached memory for FW if present */
352 mdev->fw.arbel.fw_icm =
353 mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
354 GFP_HIGHUSER | __GFP_NOWARN, 0);
355 if (!mdev->fw.arbel.fw_icm) {
356 mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
360 err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm);
362 mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err);
365 err = mthca_RUN_FW(mdev);
367 mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err);
374 mthca_UNMAP_FA(mdev);
377 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
381 static int mthca_init_icm(struct mthca_dev *mdev,
382 struct mthca_dev_lim *dev_lim,
383 struct mthca_init_hca_param *init_hca,
389 err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
391 mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err);
395 mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
396 (unsigned long long) icm_size >> 10,
397 (unsigned long long) aux_pages << 2);
399 mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
400 GFP_HIGHUSER | __GFP_NOWARN, 0);
401 if (!mdev->fw.arbel.aux_icm) {
402 mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
406 err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm);
408 mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err);
412 err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
414 mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
418 /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
419 mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
420 dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
422 mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
423 mdev->limits.mtt_seg_size,
424 mdev->limits.num_mtt_segs,
425 mdev->limits.reserved_mtts,
427 if (!mdev->mr_table.mtt_table) {
428 mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
433 mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
434 dev_lim->mpt_entry_sz,
435 mdev->limits.num_mpts,
436 mdev->limits.reserved_mrws,
438 if (!mdev->mr_table.mpt_table) {
439 mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
444 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
445 dev_lim->qpc_entry_sz,
446 mdev->limits.num_qps,
447 mdev->limits.reserved_qps,
449 if (!mdev->qp_table.qp_table) {
450 mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
455 mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
456 dev_lim->eqpc_entry_sz,
457 mdev->limits.num_qps,
458 mdev->limits.reserved_qps,
460 if (!mdev->qp_table.eqp_table) {
461 mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
466 mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
467 MTHCA_RDB_ENTRY_SIZE,
468 mdev->limits.num_qps <<
469 mdev->qp_table.rdb_shift, 0,
471 if (!mdev->qp_table.rdb_table) {
472 mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
477 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
478 dev_lim->cqc_entry_sz,
479 mdev->limits.num_cqs,
480 mdev->limits.reserved_cqs,
482 if (!mdev->cq_table.table) {
483 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
488 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
489 mdev->srq_table.table =
490 mthca_alloc_icm_table(mdev, init_hca->srqc_base,
491 dev_lim->srq_entry_sz,
492 mdev->limits.num_srqs,
493 mdev->limits.reserved_srqs,
495 if (!mdev->srq_table.table) {
496 mthca_err(mdev, "Failed to map SRQ context memory, "
504 * It's not strictly required, but for simplicity just map the
505 * whole multicast group table now. The table isn't very big
506 * and it's a lot easier than trying to track ref counts.
508 mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
509 MTHCA_MGM_ENTRY_SIZE,
510 mdev->limits.num_mgms +
511 mdev->limits.num_amgms,
512 mdev->limits.num_mgms +
513 mdev->limits.num_amgms,
515 if (!mdev->mcg_table.table) {
516 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
524 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
525 mthca_free_icm_table(mdev, mdev->srq_table.table);
528 mthca_free_icm_table(mdev, mdev->cq_table.table);
531 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
534 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
537 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
540 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
543 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
546 mthca_unmap_eq_icm(mdev);
549 mthca_UNMAP_ICM_AUX(mdev);
552 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
557 static void mthca_free_icms(struct mthca_dev *mdev)
560 mthca_free_icm_table(mdev, mdev->mcg_table.table);
561 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
562 mthca_free_icm_table(mdev, mdev->srq_table.table);
563 mthca_free_icm_table(mdev, mdev->cq_table.table);
564 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
565 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
566 mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
567 mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
568 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
569 mthca_unmap_eq_icm(mdev);
571 mthca_UNMAP_ICM_AUX(mdev);
572 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
575 static int mthca_init_arbel(struct mthca_dev *mdev)
577 struct mthca_dev_lim dev_lim;
578 struct mthca_profile profile;
579 struct mthca_init_hca_param init_hca;
583 err = mthca_QUERY_FW(mdev);
585 mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err);
589 err = mthca_ENABLE_LAM(mdev);
590 if (err == -EAGAIN) {
591 mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
592 mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
594 mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err);
598 err = mthca_load_fw(mdev);
600 mthca_err(mdev, "Loading FW returned %d, aborting.\n", err);
604 err = mthca_dev_lim(mdev, &dev_lim);
606 mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err);
610 profile = hca_profile;
611 profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
612 profile.num_udav = 0;
613 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
614 profile.num_srq = dev_lim.max_srqs;
616 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
622 err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
626 err = mthca_INIT_HCA(mdev, &init_hca);
628 mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
635 mthca_free_icms(mdev);
638 mthca_UNMAP_FA(mdev);
639 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
642 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
643 mthca_DISABLE_LAM(mdev);
648 static void mthca_close_hca(struct mthca_dev *mdev)
650 mthca_CLOSE_HCA(mdev, 0);
652 if (mthca_is_memfree(mdev)) {
653 mthca_free_icms(mdev);
655 mthca_UNMAP_FA(mdev);
656 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
658 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
659 mthca_DISABLE_LAM(mdev);
664 static int mthca_init_hca(struct mthca_dev *mdev)
667 struct mthca_adapter adapter;
669 if (mthca_is_memfree(mdev))
670 err = mthca_init_arbel(mdev);
672 err = mthca_init_tavor(mdev);
677 err = mthca_QUERY_ADAPTER(mdev, &adapter);
679 mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err);
683 mdev->eq_table.inta_pin = adapter.inta_pin;
684 if (!mthca_is_memfree(mdev))
685 mdev->rev_id = adapter.revision_id;
686 memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
691 mthca_close_hca(mdev);
695 static int mthca_setup_hca(struct mthca_dev *dev)
699 MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
701 err = mthca_init_uar_table(dev);
703 mthca_err(dev, "Failed to initialize "
704 "user access region table, aborting.\n");
708 err = mthca_uar_alloc(dev, &dev->driver_uar);
710 mthca_err(dev, "Failed to allocate driver access region, "
712 goto err_uar_table_free;
715 dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
717 mthca_err(dev, "Couldn't map kernel access region, "
723 err = mthca_init_pd_table(dev);
725 mthca_err(dev, "Failed to initialize "
726 "protection domain table, aborting.\n");
730 err = mthca_init_mr_table(dev);
732 mthca_err(dev, "Failed to initialize "
733 "memory region table, aborting.\n");
734 goto err_pd_table_free;
737 err = mthca_pd_alloc(dev, 1, &dev->driver_pd);
739 mthca_err(dev, "Failed to create driver PD, "
741 goto err_mr_table_free;
744 err = mthca_init_eq_table(dev);
746 mthca_err(dev, "Failed to initialize "
747 "event queue table, aborting.\n");
751 err = mthca_cmd_use_events(dev);
753 mthca_err(dev, "Failed to switch to event-driven "
754 "firmware commands, aborting.\n");
755 goto err_eq_table_free;
758 err = mthca_NOP(dev);
760 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
761 mthca_warn(dev, "NOP command failed to generate interrupt "
763 dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);
764 mthca_warn(dev, "Trying again with MSI-X disabled.\n");
766 mthca_err(dev, "NOP command failed to generate interrupt "
767 "(IRQ %d), aborting.\n",
769 mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n");
775 mthca_dbg(dev, "NOP command IRQ test passed\n");
777 err = mthca_init_cq_table(dev);
779 mthca_err(dev, "Failed to initialize "
780 "completion queue table, aborting.\n");
784 err = mthca_init_srq_table(dev);
786 mthca_err(dev, "Failed to initialize "
787 "shared receive queue table, aborting.\n");
788 goto err_cq_table_free;
791 err = mthca_init_qp_table(dev);
793 mthca_err(dev, "Failed to initialize "
794 "queue pair table, aborting.\n");
795 goto err_srq_table_free;
798 err = mthca_init_av_table(dev);
800 mthca_err(dev, "Failed to initialize "
801 "address vector table, aborting.\n");
802 goto err_qp_table_free;
805 err = mthca_init_mcg_table(dev);
807 mthca_err(dev, "Failed to initialize "
808 "multicast group table, aborting.\n");
809 goto err_av_table_free;
815 mthca_cleanup_av_table(dev);
818 mthca_cleanup_qp_table(dev);
821 mthca_cleanup_srq_table(dev);
824 mthca_cleanup_cq_table(dev);
827 mthca_cmd_use_polling(dev);
830 mthca_cleanup_eq_table(dev);
833 mthca_pd_free(dev, &dev->driver_pd);
836 mthca_cleanup_mr_table(dev);
839 mthca_cleanup_pd_table(dev);
845 mthca_uar_free(dev, &dev->driver_uar);
848 mthca_cleanup_uar_table(dev);
852 static int mthca_enable_msi_x(struct mthca_dev *mdev)
854 struct msix_entry entries[3];
857 entries[0].entry = 0;
858 entries[1].entry = 1;
859 entries[2].entry = 2;
861 err = pci_enable_msix_exact(mdev->pdev, entries, ARRAY_SIZE(entries));
865 mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
866 mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
867 mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector;
872 /* Types of supported HCA */
875 ARBEL_COMPAT, /* MT25208 in Tavor compat mode */
876 ARBEL_NATIVE, /* MT25208 with extended features */
880 #define MTHCA_FW_VER(major, minor, subminor) \
881 (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor))
886 } mthca_hca_table[] = {
887 [TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0),
889 [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
890 .flags = MTHCA_FLAG_PCIE },
891 [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0),
892 .flags = MTHCA_FLAG_MEMFREE |
894 [SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
895 .flags = MTHCA_FLAG_MEMFREE |
897 MTHCA_FLAG_SINAI_OPT }
900 static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
904 struct mthca_dev *mdev;
906 printk(KERN_INFO PFX "Initializing %s\n",
909 err = pci_enable_device(pdev);
911 dev_err(&pdev->dev, "Cannot enable PCI device, "
917 * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
920 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
921 pci_resource_len(pdev, 0) != 1 << 20) {
922 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
924 goto err_disable_pdev;
926 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
927 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
929 goto err_disable_pdev;
931 if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
934 err = pci_request_regions(pdev, DRV_NAME);
936 dev_err(&pdev->dev, "Cannot obtain PCI resources, "
938 goto err_disable_pdev;
941 pci_set_master(pdev);
943 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
945 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
946 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
948 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
952 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
954 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
955 "consistent PCI DMA mask.\n");
956 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
958 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
964 /* We can handle large RDMA requests, so allow larger segments. */
965 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
967 mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
969 dev_err(&pdev->dev, "Device struct alloc failed, "
977 mdev->mthca_flags = mthca_hca_table[hca_type].flags;
979 mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
982 * Now reset the HCA before we touch the PCI capabilities or
983 * attempt a firmware command, since a boot ROM may have left
984 * the HCA in an undefined state.
986 err = mthca_reset(mdev);
988 mthca_err(mdev, "Failed to reset HCA, aborting.\n");
992 err = mthca_cmd_init(mdev);
994 mthca_err(mdev, "Failed to init command interface, aborting.\n");
998 err = mthca_tune_pci(mdev);
1002 err = mthca_init_hca(mdev);
1006 if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
1007 mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
1008 (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
1009 (int) (mdev->fw_ver & 0xffff),
1010 (int) (mthca_hca_table[hca_type].latest_fw >> 32),
1011 (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
1012 (int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
1013 mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
1016 if (msi_x && !mthca_enable_msi_x(mdev))
1017 mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
1019 err = mthca_setup_hca(mdev);
1020 if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
1021 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1022 pci_disable_msix(pdev);
1023 mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
1025 err = mthca_setup_hca(mdev);
1031 err = mthca_register_device(mdev);
1035 err = mthca_create_agents(mdev);
1037 goto err_unregister;
1039 pci_set_drvdata(pdev, mdev);
1040 mdev->hca_type = hca_type;
1042 mdev->active = true;
1047 mthca_unregister_device(mdev);
1050 mthca_cleanup_mcg_table(mdev);
1051 mthca_cleanup_av_table(mdev);
1052 mthca_cleanup_qp_table(mdev);
1053 mthca_cleanup_srq_table(mdev);
1054 mthca_cleanup_cq_table(mdev);
1055 mthca_cmd_use_polling(mdev);
1056 mthca_cleanup_eq_table(mdev);
1058 mthca_pd_free(mdev, &mdev->driver_pd);
1060 mthca_cleanup_mr_table(mdev);
1061 mthca_cleanup_pd_table(mdev);
1062 mthca_cleanup_uar_table(mdev);
1065 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1066 pci_disable_msix(pdev);
1068 mthca_close_hca(mdev);
1071 mthca_cmd_cleanup(mdev);
1074 ib_dealloc_device(&mdev->ib_dev);
1077 pci_release_regions(pdev);
1080 pci_disable_device(pdev);
1081 pci_set_drvdata(pdev, NULL);
1085 static void __mthca_remove_one(struct pci_dev *pdev)
1087 struct mthca_dev *mdev = pci_get_drvdata(pdev);
1091 mthca_free_agents(mdev);
1092 mthca_unregister_device(mdev);
1094 for (p = 1; p <= mdev->limits.num_ports; ++p)
1095 mthca_CLOSE_IB(mdev, p);
1097 mthca_cleanup_mcg_table(mdev);
1098 mthca_cleanup_av_table(mdev);
1099 mthca_cleanup_qp_table(mdev);
1100 mthca_cleanup_srq_table(mdev);
1101 mthca_cleanup_cq_table(mdev);
1102 mthca_cmd_use_polling(mdev);
1103 mthca_cleanup_eq_table(mdev);
1105 mthca_pd_free(mdev, &mdev->driver_pd);
1107 mthca_cleanup_mr_table(mdev);
1108 mthca_cleanup_pd_table(mdev);
1111 mthca_uar_free(mdev, &mdev->driver_uar);
1112 mthca_cleanup_uar_table(mdev);
1113 mthca_close_hca(mdev);
1114 mthca_cmd_cleanup(mdev);
1116 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1117 pci_disable_msix(pdev);
1119 ib_dealloc_device(&mdev->ib_dev);
1120 pci_release_regions(pdev);
1121 pci_disable_device(pdev);
1122 pci_set_drvdata(pdev, NULL);
1126 int __mthca_restart_one(struct pci_dev *pdev)
1128 struct mthca_dev *mdev;
1131 mdev = pci_get_drvdata(pdev);
1134 hca_type = mdev->hca_type;
1135 __mthca_remove_one(pdev);
1136 return __mthca_init_one(pdev, hca_type);
1139 static int mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1143 mutex_lock(&mthca_device_mutex);
1145 printk_once(KERN_INFO "%s", mthca_version);
1147 if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
1148 printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
1149 pci_name(pdev), id->driver_data);
1150 mutex_unlock(&mthca_device_mutex);
1154 ret = __mthca_init_one(pdev, id->driver_data);
1156 mutex_unlock(&mthca_device_mutex);
1161 static void mthca_remove_one(struct pci_dev *pdev)
1163 mutex_lock(&mthca_device_mutex);
1164 __mthca_remove_one(pdev);
1165 mutex_unlock(&mthca_device_mutex);
1168 static struct pci_device_id mthca_pci_table[] = {
1169 { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
1170 .driver_data = TAVOR },
1171 { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
1172 .driver_data = TAVOR },
1173 { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1174 .driver_data = ARBEL_COMPAT },
1175 { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1176 .driver_data = ARBEL_COMPAT },
1177 { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL),
1178 .driver_data = ARBEL_NATIVE },
1179 { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL),
1180 .driver_data = ARBEL_NATIVE },
1181 { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI),
1182 .driver_data = SINAI },
1183 { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI),
1184 .driver_data = SINAI },
1185 { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1186 .driver_data = SINAI },
1187 { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1188 .driver_data = SINAI },
1192 MODULE_DEVICE_TABLE(pci, mthca_pci_table);
1194 static struct pci_driver mthca_driver = {
1196 .id_table = mthca_pci_table,
1197 .probe = mthca_init_one,
1198 .remove = mthca_remove_one,
1201 static void __init __mthca_check_profile_val(const char *name, int *pval,
1204 /* value must be positive and power of 2 */
1205 int old_pval = *pval;
1208 *pval = pval_default;
1210 *pval = roundup_pow_of_two(old_pval);
1212 if (old_pval != *pval) {
1213 printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n",
1215 printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval);
1219 #define mthca_check_profile_val(name, default) \
1220 __mthca_check_profile_val(#name, &hca_profile.name, default)
1222 static void __init mthca_validate_profile(void)
1224 mthca_check_profile_val(num_qp, MTHCA_DEFAULT_NUM_QP);
1225 mthca_check_profile_val(rdb_per_qp, MTHCA_DEFAULT_RDB_PER_QP);
1226 mthca_check_profile_val(num_cq, MTHCA_DEFAULT_NUM_CQ);
1227 mthca_check_profile_val(num_mcg, MTHCA_DEFAULT_NUM_MCG);
1228 mthca_check_profile_val(num_mpt, MTHCA_DEFAULT_NUM_MPT);
1229 mthca_check_profile_val(num_mtt, MTHCA_DEFAULT_NUM_MTT);
1230 mthca_check_profile_val(num_udav, MTHCA_DEFAULT_NUM_UDAV);
1231 mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS);
1233 if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) {
1234 printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n",
1235 hca_profile.fmr_reserved_mtts);
1236 printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n",
1237 hca_profile.num_mtt);
1238 hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2;
1239 printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
1240 hca_profile.fmr_reserved_mtts);
1243 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1244 printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
1245 log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
1246 log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
1250 static int __init mthca_init(void)
1254 mthca_validate_profile();
1256 ret = mthca_catas_init();
1260 ret = pci_register_driver(&mthca_driver);
1262 mthca_catas_cleanup();
1269 static void __exit mthca_cleanup(void)
1271 pci_unregister_driver(&mthca_driver);
1272 mthca_catas_cleanup();
1275 module_init(mthca_init);
1276 module_exit(mthca_cleanup);