2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
44 MLX4_COMMAND_INTERFACE_MIN_REV = 2,
45 MLX4_COMMAND_INTERFACE_MAX_REV = 3,
46 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS = 3,
49 extern void __buggy_use_of_MLX4_GET(void);
50 extern void __buggy_use_of_MLX4_PUT(void);
52 static bool enable_qos = true;
53 module_param(enable_qos, bool, 0444);
54 MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
56 #define MLX4_GET(dest, source, offset) \
58 void *__p = (char *) (source) + (offset); \
60 switch (sizeof (dest)) { \
61 case 1: (dest) = *(u8 *) __p; break; \
62 case 2: (dest) = be16_to_cpup(__p); break; \
63 case 4: (dest) = be32_to_cpup(__p); break; \
64 case 8: val = get_unaligned((u64 *)__p); \
65 (dest) = be64_to_cpu(val); break; \
66 default: __buggy_use_of_MLX4_GET(); \
70 #define MLX4_PUT(dest, source, offset) \
72 void *__d = ((char *) (dest) + (offset)); \
73 switch (sizeof(source)) { \
74 case 1: *(u8 *) __d = (source); break; \
75 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
76 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
77 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
78 default: __buggy_use_of_MLX4_PUT(); \
82 static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
84 static const char *fname[] = {
85 [ 0] = "RC transport",
86 [ 1] = "UC transport",
87 [ 2] = "UD transport",
88 [ 3] = "XRC transport",
90 [ 7] = "IPoIB checksum offload",
91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter",
93 [12] = "Dual Port Different Protocol (DPDP) support",
94 [15] = "Big LSO headers",
97 [18] = "Atomic ops support",
98 [19] = "Raw multicast support",
99 [20] = "Address vector port checking support",
100 [21] = "UD multicast support",
101 [30] = "IBoE support",
102 [32] = "Unicast loopback support",
103 [34] = "FCS header control",
104 [37] = "Wake On LAN (port1) support",
105 [38] = "Wake On LAN (port2) support",
106 [40] = "UDP RSS support",
107 [41] = "Unicast VEP steering support",
108 [42] = "Multicast VEP steering support",
109 [48] = "Counters support",
110 [52] = "RSS IP fragments support",
111 [53] = "Port ETS Scheduler support",
112 [55] = "Port link type sensing support",
113 [59] = "Port management change event support",
114 [61] = "64 byte EQE support",
115 [62] = "64 byte CQE support",
119 mlx4_dbg(dev, "DEV_CAP flags:\n");
120 for (i = 0; i < ARRAY_SIZE(fname); ++i)
121 if (fname[i] && (flags & (1LL << i)))
122 mlx4_dbg(dev, " %s\n", fname[i]);
125 static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
127 static const char * const fname[] = {
129 [1] = "RSS Toeplitz Hash Function support",
130 [2] = "RSS XOR Hash Function support",
131 [3] = "Device managed flow steering support",
132 [4] = "Automatic MAC reassignment support",
133 [5] = "Time stamping support",
134 [6] = "VST (control vlan insertion/stripping) support",
135 [7] = "FSM (MAC anti-spoofing) support",
136 [8] = "Dynamic QP updates support",
137 [9] = "Device managed flow steering IPoIB support",
138 [10] = "TCP/IP offloads/flow-steering for VXLAN support",
139 [11] = "MAD DEMUX (Secure-Host) support",
140 [12] = "Large cache line (>64B) CQE stride support",
141 [13] = "Large cache line (>64B) EQE stride support",
142 [14] = "Ethernet protocol control support",
143 [15] = "Ethernet Backplane autoneg support",
144 [16] = "CONFIG DEV support",
145 [17] = "Asymmetric EQs support",
146 [18] = "More than 80 VFs support",
147 [19] = "Performance optimized for limited rule configuration flow steering support",
148 [20] = "Recoverable error events support",
149 [21] = "Port Remap support",
150 [22] = "QCN support",
151 [23] = "QP rate limiting support",
152 [24] = "Ethernet Flow control statistics support",
153 [25] = "Granular QoS per VF support",
154 [26] = "Port ETS Scheduler support",
155 [27] = "Port beacon support",
156 [28] = "RX-ALL support",
157 [29] = "802.1ad offload support",
158 [31] = "Modifying loopback source checks using UPDATE_QP support",
159 [32] = "Loopback source checks support",
163 for (i = 0; i < ARRAY_SIZE(fname); ++i)
164 if (fname[i] && (flags & (1LL << i)))
165 mlx4_dbg(dev, " %s\n", fname[i]);
168 int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
170 struct mlx4_cmd_mailbox *mailbox;
174 #define MOD_STAT_CFG_IN_SIZE 0x100
176 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
177 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
179 mailbox = mlx4_alloc_cmd_mailbox(dev);
181 return PTR_ERR(mailbox);
182 inbox = mailbox->buf;
184 MLX4_PUT(inbox, cfg->log_pg_sz, MOD_STAT_CFG_PG_SZ_OFFSET);
185 MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
187 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
188 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
190 mlx4_free_cmd_mailbox(dev, mailbox);
194 int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave)
196 struct mlx4_cmd_mailbox *mailbox;
203 #define QUERY_FUNC_BUS_OFFSET 0x00
204 #define QUERY_FUNC_DEVICE_OFFSET 0x01
205 #define QUERY_FUNC_FUNCTION_OFFSET 0x01
206 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
207 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
208 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06
209 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
211 mailbox = mlx4_alloc_cmd_mailbox(dev);
213 return PTR_ERR(mailbox);
214 outbox = mailbox->buf;
218 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0,
220 MLX4_CMD_TIME_CLASS_A,
225 MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET);
226 func->bus = field & 0xf;
227 MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET);
228 func->device = field & 0xf1;
229 MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET);
230 func->function = field & 0x7;
231 MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET);
232 func->physical_function = field & 0xf;
233 MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET);
234 func->rsvd_eqs = field16 & 0xffff;
235 MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET);
236 func->max_eq = field16 & 0xffff;
237 MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET);
238 func->rsvd_uars = field & 0x0f;
240 mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
241 func->bus, func->device, func->function, func->physical_function,
242 func->max_eq, func->rsvd_eqs, func->rsvd_uars);
245 mlx4_free_cmd_mailbox(dev, mailbox);
249 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
250 struct mlx4_vhcr *vhcr,
251 struct mlx4_cmd_mailbox *inbox,
252 struct mlx4_cmd_mailbox *outbox,
253 struct mlx4_cmd_info *cmd)
255 struct mlx4_priv *priv = mlx4_priv(dev);
257 u32 size, proxy_qp, qkey;
259 struct mlx4_func func;
261 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
262 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
263 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
264 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
265 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
266 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
267 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
268 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
269 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
270 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
271 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
272 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
273 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
275 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
276 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
277 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
278 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
279 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
280 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
282 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c
284 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
285 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
286 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
287 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
288 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
289 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
291 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
292 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30)
294 /* when opcode modifier = 1 */
295 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
296 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
297 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
298 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
300 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
301 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
302 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
303 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
304 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
306 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
307 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
308 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
309 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
311 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
312 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
313 #define QUERY_FUNC_CAP_PHV_BIT 0x40
315 if (vhcr->op_modifier == 1) {
316 struct mlx4_active_ports actv_ports =
317 mlx4_get_active_ports(dev, slave);
318 int converted_port = mlx4_slave_convert_port(
319 dev, slave, vhcr->in_modifier);
321 if (converted_port < 0)
324 vhcr->in_modifier = converted_port;
325 /* phys-port = logical-port */
326 field = vhcr->in_modifier -
327 find_first_bit(actv_ports.ports, dev->caps.num_ports);
328 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
330 port = vhcr->in_modifier;
331 proxy_qp = dev->phys_caps.base_proxy_sqpn + 8 * slave + port - 1;
333 /* Set nic_info bit to mark new fields support */
334 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
336 if (mlx4_vf_smi_enabled(dev, slave, port) &&
337 !mlx4_get_parav_qkey(dev, proxy_qp, &qkey)) {
338 field |= QUERY_FUNC_CAP_VF_ENABLE_QP0;
339 MLX4_PUT(outbox->buf, qkey,
340 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
342 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
344 /* size is now the QP number */
345 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + port - 1;
346 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
349 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_TUNNEL);
351 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP0_PROXY);
353 MLX4_PUT(outbox->buf, proxy_qp, QUERY_FUNC_CAP_QP1_PROXY);
355 MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
356 QUERY_FUNC_CAP_PHYS_PORT_ID);
358 if (dev->caps.phv_bit[port]) {
359 field = QUERY_FUNC_CAP_PHV_BIT;
360 MLX4_PUT(outbox->buf, field,
361 QUERY_FUNC_CAP_FLAGS0_OFFSET);
364 } else if (vhcr->op_modifier == 0) {
365 struct mlx4_active_ports actv_ports =
366 mlx4_get_active_ports(dev, slave);
367 /* enable rdma and ethernet interfaces, new quota locations,
370 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
371 QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX |
372 QUERY_FUNC_CAP_FLAG_RESD_LKEY);
373 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
376 bitmap_weight(actv_ports.ports, dev->caps.num_ports),
377 dev->caps.num_ports);
378 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
380 size = dev->caps.function_caps; /* set PF behaviours */
381 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
383 field = 0; /* protected FMR support not available as yet */
384 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
386 size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave];
387 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
388 size = dev->caps.num_qps;
389 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
391 size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave];
392 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
393 size = dev->caps.num_srqs;
394 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
396 size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave];
397 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
398 size = dev->caps.num_cqs;
399 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
401 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) ||
402 mlx4_QUERY_FUNC(dev, &func, slave)) {
403 size = vhcr->in_modifier &
404 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
406 rounddown_pow_of_two(dev->caps.num_eqs);
407 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
408 size = dev->caps.reserved_eqs;
409 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
411 size = vhcr->in_modifier &
412 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ?
414 rounddown_pow_of_two(func.max_eq);
415 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
416 size = func.rsvd_eqs;
417 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
420 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave];
421 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
422 size = dev->caps.num_mpts;
423 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
425 size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave];
426 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
427 size = dev->caps.num_mtts;
428 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
430 size = dev->caps.num_mgms + dev->caps.num_amgms;
431 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
432 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
434 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG |
435 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG;
436 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
438 size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00);
439 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
446 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
447 struct mlx4_func_cap *func_cap)
449 struct mlx4_cmd_mailbox *mailbox;
451 u8 field, op_modifier;
453 int err = 0, quotas = 0;
456 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */
457 in_modifier = op_modifier ? gen_or_port :
458 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS;
460 mailbox = mlx4_alloc_cmd_mailbox(dev);
462 return PTR_ERR(mailbox);
464 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier,
465 MLX4_CMD_QUERY_FUNC_CAP,
466 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
470 outbox = mailbox->buf;
473 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
474 if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
475 mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
476 err = -EPROTONOSUPPORT;
479 func_cap->flags = field;
480 quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS);
482 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
483 func_cap->num_ports = field;
485 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
486 func_cap->pf_context_behaviour = size;
489 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
490 func_cap->qp_quota = size & 0xFFFFFF;
492 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
493 func_cap->srq_quota = size & 0xFFFFFF;
495 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
496 func_cap->cq_quota = size & 0xFFFFFF;
498 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
499 func_cap->mpt_quota = size & 0xFFFFFF;
501 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
502 func_cap->mtt_quota = size & 0xFFFFFF;
504 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
505 func_cap->mcg_quota = size & 0xFFFFFF;
508 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP);
509 func_cap->qp_quota = size & 0xFFFFFF;
511 MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP);
512 func_cap->srq_quota = size & 0xFFFFFF;
514 MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP);
515 func_cap->cq_quota = size & 0xFFFFFF;
517 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP);
518 func_cap->mpt_quota = size & 0xFFFFFF;
520 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP);
521 func_cap->mtt_quota = size & 0xFFFFFF;
523 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP);
524 func_cap->mcg_quota = size & 0xFFFFFF;
526 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
527 func_cap->max_eq = size & 0xFFFFFF;
529 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
530 func_cap->reserved_eq = size & 0xFFFFFF;
532 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) {
533 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET);
534 func_cap->reserved_lkey = size;
536 func_cap->reserved_lkey = 0;
539 func_cap->extra_flags = 0;
541 /* Mailbox data from 0x6c and onward should only be treated if
542 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
544 if (func_cap->flags & QUERY_FUNC_CAP_FLAG_VALID_MAILBOX) {
545 MLX4_GET(size, outbox, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET);
546 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG)
547 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP;
548 if (size & QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG)
549 func_cap->extra_flags |= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP;
555 /* logical port query */
556 if (gen_or_port > dev->caps.num_ports) {
561 MLX4_GET(func_cap->flags1, outbox, QUERY_FUNC_CAP_FLAGS1_OFFSET);
562 if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_ETH) {
563 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN) {
564 mlx4_err(dev, "VLAN is enforced on this port\n");
565 err = -EPROTONOSUPPORT;
569 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_FORCE_MAC) {
570 mlx4_err(dev, "Force mac is enabled on this port\n");
571 err = -EPROTONOSUPPORT;
574 } else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
575 MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
576 if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
577 mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
578 err = -EPROTONOSUPPORT;
583 MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
584 func_cap->physical_port = field;
585 if (func_cap->physical_port != gen_or_port) {
590 if (func_cap->flags1 & QUERY_FUNC_CAP_VF_ENABLE_QP0) {
591 MLX4_GET(qkey, outbox, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET);
592 func_cap->qp0_qkey = qkey;
594 func_cap->qp0_qkey = 0;
597 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_TUNNEL);
598 func_cap->qp0_tunnel_qpn = size & 0xFFFFFF;
600 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP0_PROXY);
601 func_cap->qp0_proxy_qpn = size & 0xFFFFFF;
603 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_TUNNEL);
604 func_cap->qp1_tunnel_qpn = size & 0xFFFFFF;
606 MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP1_PROXY);
607 func_cap->qp1_proxy_qpn = size & 0xFFFFFF;
609 if (func_cap->flags1 & QUERY_FUNC_CAP_FLAGS1_NIC_INFO)
610 MLX4_GET(func_cap->phys_port_id, outbox,
611 QUERY_FUNC_CAP_PHYS_PORT_ID);
613 MLX4_GET(func_cap->flags0, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
615 /* All other resources are allocated by the master, but we still report
616 * 'num' and 'reserved' capabilities as follows:
617 * - num remains the maximum resource index
618 * - 'num - reserved' is the total available objects of a resource, but
619 * resource indices may be less than 'reserved'
620 * TODO: set per-resource quotas */
623 mlx4_free_cmd_mailbox(dev, mailbox);
628 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
630 struct mlx4_cmd_mailbox *mailbox;
633 u32 field32, flags, ext_flags;
639 #define QUERY_DEV_CAP_OUT_SIZE 0x100
640 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
641 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
642 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
643 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
644 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
645 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
646 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
647 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
648 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
649 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
650 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
651 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
652 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
653 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
654 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
655 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
656 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
657 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
658 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
659 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
660 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
661 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
662 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
663 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
664 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
665 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
666 #define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34
667 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
668 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
669 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
670 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
671 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
672 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
673 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
674 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
675 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
676 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
677 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
678 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
679 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
680 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
681 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
682 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
683 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
684 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
685 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
686 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
687 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
688 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
689 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
690 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
691 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
692 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
693 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
694 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
695 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
696 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70
697 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
698 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
699 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
700 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
701 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
702 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
703 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
704 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
705 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
706 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
707 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
708 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
709 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
710 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
711 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
712 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
713 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
714 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
715 #define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96
716 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
717 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
718 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
719 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
720 #define QUERY_DEV_CAP_VXLAN 0x9e
721 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
722 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
723 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
724 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
725 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
726 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
730 mailbox = mlx4_alloc_cmd_mailbox(dev);
732 return PTR_ERR(mailbox);
733 outbox = mailbox->buf;
735 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
736 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
740 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_QP_OFFSET);
741 dev_cap->reserved_qps = 1 << (field & 0xf);
742 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_OFFSET);
743 dev_cap->max_qps = 1 << (field & 0x1f);
744 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_SRQ_OFFSET);
745 dev_cap->reserved_srqs = 1 << (field >> 4);
746 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_OFFSET);
747 dev_cap->max_srqs = 1 << (field & 0x1f);
748 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET);
749 dev_cap->max_cq_sz = 1 << field;
750 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_CQ_OFFSET);
751 dev_cap->reserved_cqs = 1 << (field & 0xf);
752 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_CQ_OFFSET);
753 dev_cap->max_cqs = 1 << (field & 0x1f);
754 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET);
755 dev_cap->max_mpts = 1 << (field & 0x3f);
756 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET);
757 dev_cap->reserved_eqs = 1 << (field & 0xf);
758 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET);
759 dev_cap->max_eqs = 1 << (field & 0xf);
760 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET);
761 dev_cap->reserved_mtts = 1 << (field >> 4);
762 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET);
763 dev_cap->max_mrw_sz = 1 << field;
764 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MRW_OFFSET);
765 dev_cap->reserved_mrws = 1 << (field & 0xf);
766 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET);
767 dev_cap->max_mtt_seg = 1 << (field & 0x3f);
768 MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET);
769 dev_cap->num_sys_eqs = size & 0xfff;
770 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET);
771 dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
772 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
773 dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
774 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
777 dev_cap->max_gso_sz = 0;
779 dev_cap->max_gso_sz = 1 << field;
781 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSS_OFFSET);
783 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_XOR;
785 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS_TOP;
788 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RSS;
789 dev_cap->max_rss_tbl_sz = 1 << field;
791 dev_cap->max_rss_tbl_sz = 0;
792 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
793 dev_cap->max_rdma_global = 1 << (field & 0x3f);
794 MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
795 dev_cap->local_ca_ack_delay = field & 0x1f;
796 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
797 dev_cap->num_ports = field & 0xf;
798 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
799 dev_cap->max_msg_sz = 1 << (field & 0x1f);
800 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET);
802 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN;
803 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
805 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
806 dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
807 MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
809 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
810 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
812 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB;
813 MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
814 dev_cap->fs_max_num_qp_per_entry = field;
815 MLX4_GET(field, outbox, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
817 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QCN;
818 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
819 dev_cap->stat_rate_support = stat_rate;
820 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
822 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_TS;
823 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
824 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
825 dev_cap->flags = flags | (u64)ext_flags << 32;
826 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
827 dev_cap->reserved_uars = field >> 4;
828 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
829 dev_cap->uar_size = 1 << ((field & 0x3f) + 20);
830 MLX4_GET(field, outbox, QUERY_DEV_CAP_PAGE_SZ_OFFSET);
831 dev_cap->min_page_sz = 1 << field;
833 MLX4_GET(field, outbox, QUERY_DEV_CAP_BF_OFFSET);
835 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
836 dev_cap->bf_reg_size = 1 << (field & 0x1f);
837 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
838 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
840 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
842 dev_cap->bf_reg_size = 0;
845 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET);
846 dev_cap->max_sq_sg = field;
847 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET);
848 dev_cap->max_sq_desc_sz = size;
850 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET);
851 dev_cap->max_qp_per_mcg = 1 << field;
852 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MCG_OFFSET);
853 dev_cap->reserved_mgms = field & 0xf;
854 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MCG_OFFSET);
855 dev_cap->max_mcgs = 1 << field;
856 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_PD_OFFSET);
857 dev_cap->reserved_pds = field >> 4;
858 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PD_OFFSET);
859 dev_cap->max_pds = 1 << (field & 0x3f);
860 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_XRC_OFFSET);
861 dev_cap->reserved_xrcds = field >> 4;
862 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_XRC_OFFSET);
863 dev_cap->max_xrcds = 1 << (field & 0x1f);
865 MLX4_GET(size, outbox, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET);
866 dev_cap->rdmarc_entry_sz = size;
867 MLX4_GET(size, outbox, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET);
868 dev_cap->qpc_entry_sz = size;
869 MLX4_GET(size, outbox, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET);
870 dev_cap->aux_entry_sz = size;
871 MLX4_GET(size, outbox, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET);
872 dev_cap->altc_entry_sz = size;
873 MLX4_GET(size, outbox, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET);
874 dev_cap->eqc_entry_sz = size;
875 MLX4_GET(size, outbox, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET);
876 dev_cap->cqc_entry_sz = size;
877 MLX4_GET(size, outbox, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET);
878 dev_cap->srq_entry_sz = size;
879 MLX4_GET(size, outbox, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET);
880 dev_cap->cmpt_entry_sz = size;
881 MLX4_GET(size, outbox, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET);
882 dev_cap->mtt_entry_sz = size;
883 MLX4_GET(size, outbox, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET);
884 dev_cap->dmpt_entry_sz = size;
886 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET);
887 dev_cap->max_srq_sz = 1 << field;
888 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET);
889 dev_cap->max_qp_sz = 1 << field;
890 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSZ_SRQ_OFFSET);
891 dev_cap->resize_srq = field & 1;
892 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET);
893 dev_cap->max_rq_sg = field;
894 MLX4_GET(size, outbox, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET);
895 dev_cap->max_rq_desc_sz = size;
896 MLX4_GET(field, outbox, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
897 if (field & (1 << 4))
898 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QOS_VPP;
899 if (field & (1 << 5))
900 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL;
901 if (field & (1 << 6))
902 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
903 if (field & (1 << 7))
904 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
905 MLX4_GET(dev_cap->bmme_flags, outbox,
906 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
907 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
908 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
909 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
911 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
912 if (field & (1 << 2))
913 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
914 MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
916 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
918 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
920 MLX4_GET(dev_cap->reserved_lkey, outbox,
921 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
922 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
923 if (field32 & (1 << 0))
924 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
925 if (field32 & (1 << 7))
926 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
927 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
929 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
930 MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
932 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS;
933 if (field & (1 << 5))
934 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
935 MLX4_GET(dev_cap->max_icm_sz, outbox,
936 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
937 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
938 MLX4_GET(dev_cap->max_counters, outbox,
939 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
941 MLX4_GET(field32, outbox,
942 QUERY_DEV_CAP_MAD_DEMUX_OFFSET);
943 if (field32 & (1 << 0))
944 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_MAD_DEMUX;
946 MLX4_GET(dev_cap->dmfs_high_rate_qpn_base, outbox,
947 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET);
948 dev_cap->dmfs_high_rate_qpn_base &= MGM_QPN_MASK;
949 MLX4_GET(dev_cap->dmfs_high_rate_qpn_range, outbox,
950 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET);
951 dev_cap->dmfs_high_rate_qpn_range &= MGM_QPN_MASK;
953 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
954 dev_cap->rl_caps.num_rates = size;
955 if (dev_cap->rl_caps.num_rates) {
956 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT;
957 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET);
958 dev_cap->rl_caps.max_val = size & 0xfff;
959 dev_cap->rl_caps.max_unit = size >> 14;
960 MLX4_GET(size, outbox, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET);
961 dev_cap->rl_caps.min_val = size & 0xfff;
962 dev_cap->rl_caps.min_unit = size >> 14;
965 MLX4_GET(field32, outbox, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
966 if (field32 & (1 << 16))
967 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP;
968 if (field32 & (1 << 18))
969 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB;
970 if (field32 & (1 << 19))
971 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK;
972 if (field32 & (1 << 26))
973 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL;
974 if (field32 & (1 << 20))
975 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
976 if (field32 & (1 << 21))
977 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
979 for (i = 1; i <= dev_cap->num_ports; i++) {
980 err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
986 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
987 * we can't use any EQs whose doorbell falls on that page,
988 * even if the EQ itself isn't reserved.
990 if (dev_cap->num_sys_eqs == 0)
991 dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4,
992 dev_cap->reserved_eqs);
994 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS;
997 mlx4_free_cmd_mailbox(dev, mailbox);
1001 void mlx4_dev_cap_dump(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
1003 if (dev_cap->bf_reg_size > 0)
1004 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
1005 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
1007 mlx4_dbg(dev, "BlueFlame not available\n");
1009 mlx4_dbg(dev, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
1010 dev_cap->bmme_flags, dev_cap->reserved_lkey);
1011 mlx4_dbg(dev, "Max ICM size %lld MB\n",
1012 (unsigned long long) dev_cap->max_icm_sz >> 20);
1013 mlx4_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1014 dev_cap->max_qps, dev_cap->reserved_qps, dev_cap->qpc_entry_sz);
1015 mlx4_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1016 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz);
1017 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1018 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz);
1019 mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
1020 dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs,
1021 dev_cap->eqc_entry_sz);
1022 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n",
1023 dev_cap->reserved_mrws, dev_cap->reserved_mtts);
1024 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1025 dev_cap->max_pds, dev_cap->reserved_pds, dev_cap->reserved_uars);
1026 mlx4_dbg(dev, "Max QP/MCG: %d, reserved MGMs: %d\n",
1027 dev_cap->max_pds, dev_cap->reserved_mgms);
1028 mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1029 dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
1030 mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
1031 dev_cap->local_ca_ack_delay, 128 << dev_cap->port_cap[1].ib_mtu,
1032 dev_cap->port_cap[1].max_port_width);
1033 mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
1034 dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
1035 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
1036 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
1037 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
1038 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
1039 mlx4_dbg(dev, "Max RSS Table size: %d\n", dev_cap->max_rss_tbl_sz);
1040 mlx4_dbg(dev, "DMFS high rate steer QPn base: %d\n",
1041 dev_cap->dmfs_high_rate_qpn_base);
1042 mlx4_dbg(dev, "DMFS high rate steer QPn range: %d\n",
1043 dev_cap->dmfs_high_rate_qpn_range);
1045 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT) {
1046 struct mlx4_rate_limit_caps *rl_caps = &dev_cap->rl_caps;
1048 mlx4_dbg(dev, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n",
1049 rl_caps->num_rates, rl_caps->max_unit, rl_caps->max_val,
1050 rl_caps->min_unit, rl_caps->min_val);
1053 dump_dev_cap_flags(dev, dev_cap->flags);
1054 dump_dev_cap_flags2(dev, dev_cap->flags2);
1057 int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap)
1059 struct mlx4_cmd_mailbox *mailbox;
1065 mailbox = mlx4_alloc_cmd_mailbox(dev);
1066 if (IS_ERR(mailbox))
1067 return PTR_ERR(mailbox);
1068 outbox = mailbox->buf;
1070 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1071 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1072 MLX4_CMD_TIME_CLASS_A,
1078 MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
1079 port_cap->max_vl = field >> 4;
1080 MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
1081 port_cap->ib_mtu = field >> 4;
1082 port_cap->max_port_width = field & 0xf;
1083 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
1084 port_cap->max_gids = 1 << (field & 0xf);
1085 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_PKEY_OFFSET);
1086 port_cap->max_pkeys = 1 << (field & 0xf);
1088 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
1089 #define QUERY_PORT_MTU_OFFSET 0x01
1090 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
1091 #define QUERY_PORT_WIDTH_OFFSET 0x06
1092 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
1093 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
1094 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
1095 #define QUERY_PORT_MAC_OFFSET 0x10
1096 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
1097 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
1098 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
1100 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0, MLX4_CMD_QUERY_PORT,
1101 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1105 MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1106 port_cap->supported_port_types = field & 3;
1107 port_cap->suggested_type = (field >> 3) & 1;
1108 port_cap->default_sense = (field >> 4) & 1;
1109 port_cap->dmfs_optimized_state = (field >> 5) & 1;
1110 MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
1111 port_cap->ib_mtu = field & 0xf;
1112 MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
1113 port_cap->max_port_width = field & 0xf;
1114 MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
1115 port_cap->max_gids = 1 << (field >> 4);
1116 port_cap->max_pkeys = 1 << (field & 0xf);
1117 MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
1118 port_cap->max_vl = field & 0xf;
1119 MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
1120 port_cap->log_max_macs = field & 0xf;
1121 port_cap->log_max_vlans = field >> 4;
1122 MLX4_GET(port_cap->eth_mtu, outbox, QUERY_PORT_ETH_MTU_OFFSET);
1123 MLX4_GET(port_cap->def_mac, outbox, QUERY_PORT_MAC_OFFSET);
1124 MLX4_GET(field32, outbox, QUERY_PORT_TRANS_VENDOR_OFFSET);
1125 port_cap->trans_type = field32 >> 24;
1126 port_cap->vendor_oui = field32 & 0xffffff;
1127 MLX4_GET(port_cap->wavelength, outbox, QUERY_PORT_WAVELENGTH_OFFSET);
1128 MLX4_GET(port_cap->trans_code, outbox, QUERY_PORT_TRANS_CODE_OFFSET);
1132 mlx4_free_cmd_mailbox(dev, mailbox);
1136 #define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28)
1137 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
1138 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21)
1139 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20)
1141 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1142 struct mlx4_vhcr *vhcr,
1143 struct mlx4_cmd_mailbox *inbox,
1144 struct mlx4_cmd_mailbox *outbox,
1145 struct mlx4_cmd_info *cmd)
1151 u32 bmme_flags, field32;
1155 struct mlx4_active_ports actv_ports;
1157 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
1158 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1162 /* add port mng change event capability and disable mw type 1
1163 * unconditionally to slaves
1165 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1166 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
1167 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
1168 actv_ports = mlx4_get_active_ports(dev, slave);
1169 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
1170 for (slave_port = 0, real_port = first_port;
1171 real_port < first_port +
1172 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
1173 ++real_port, ++slave_port) {
1174 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
1175 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
1177 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1179 for (; slave_port < dev->caps.num_ports; ++slave_port)
1180 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
1182 /* Not exposing RSS IP fragments to guests */
1183 flags &= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG;
1184 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
1186 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
1188 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
1189 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
1191 /* For guests, disable timestamp */
1192 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1194 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
1196 /* For guests, disable vxlan tunneling and QoS support */
1197 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
1199 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
1201 /* For guests, disable port BEACON */
1202 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
1204 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
1206 /* For guests, report Blueflame disabled */
1207 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
1209 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1211 /* For guests, disable mw type 2 and port remap*/
1212 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1213 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1214 bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
1215 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1217 /* turn off device-managed steering capability if not enabled */
1218 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1219 MLX4_GET(field, outbox->buf,
1220 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1222 MLX4_PUT(outbox->buf, field,
1223 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
1226 /* turn off ipoib managed steering for guests */
1227 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1229 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
1231 /* turn off host side virt features (VST, FSM, etc) for guests */
1232 MLX4_GET(field32, outbox->buf, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1233 field32 &= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL | DEV_CAP_EXT_2_FLAG_80_VFS |
1234 DEV_CAP_EXT_2_FLAG_FSM | DEV_CAP_EXT_2_FLAG_PFC_COUNTERS);
1235 MLX4_PUT(outbox->buf, field32, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET);
1237 /* turn off QCN for guests */
1238 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1240 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET);
1242 /* turn off QP max-rate limiting for guests */
1244 MLX4_PUT(outbox->buf, field16, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET);
1246 /* turn off QoS per VF support for guests */
1247 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1249 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE);
1251 /* turn off ignore FCS feature for guests */
1252 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1254 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
1259 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1260 struct mlx4_vhcr *vhcr,
1261 struct mlx4_cmd_mailbox *inbox,
1262 struct mlx4_cmd_mailbox *outbox,
1263 struct mlx4_cmd_info *cmd)
1265 struct mlx4_priv *priv = mlx4_priv(dev);
1270 int admin_link_state;
1271 int port = mlx4_slave_convert_port(dev, slave,
1272 vhcr->in_modifier & 0xFF);
1274 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
1275 #define MLX4_PORT_LINK_UP_MASK 0x80
1276 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
1277 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
1282 /* Protect against untrusted guests: enforce that this is the
1283 * QUERY_PORT general query.
1285 if (vhcr->op_modifier || vhcr->in_modifier & ~0xFF)
1288 vhcr->in_modifier = port;
1290 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
1291 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1294 if (!err && dev->caps.function != slave) {
1295 def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
1296 MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
1298 /* get port type - currently only eth is enabled */
1299 MLX4_GET(port_type, outbox->buf,
1300 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1302 /* No link sensing allowed */
1303 port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
1304 /* set port type to currently operating port type */
1305 port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
1307 admin_link_state = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.link_state;
1308 if (IFLA_VF_LINK_STATE_ENABLE == admin_link_state)
1309 port_type |= MLX4_PORT_LINK_UP_MASK;
1310 else if (IFLA_VF_LINK_STATE_DISABLE == admin_link_state)
1311 port_type &= ~MLX4_PORT_LINK_UP_MASK;
1313 MLX4_PUT(outbox->buf, port_type,
1314 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
1316 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
1317 short_field = mlx4_get_slave_num_gids(dev, slave, port);
1319 short_field = 1; /* slave max gids */
1320 MLX4_PUT(outbox->buf, short_field,
1321 QUERY_PORT_CUR_MAX_GID_OFFSET);
1323 short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
1324 MLX4_PUT(outbox->buf, short_field,
1325 QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1331 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
1332 int *gid_tbl_len, int *pkey_tbl_len)
1334 struct mlx4_cmd_mailbox *mailbox;
1339 mailbox = mlx4_alloc_cmd_mailbox(dev);
1340 if (IS_ERR(mailbox))
1341 return PTR_ERR(mailbox);
1343 err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
1344 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
1349 outbox = mailbox->buf;
1351 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
1352 *gid_tbl_len = field;
1354 MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
1355 *pkey_tbl_len = field;
1358 mlx4_free_cmd_mailbox(dev, mailbox);
1361 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
1363 int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
1365 struct mlx4_cmd_mailbox *mailbox;
1366 struct mlx4_icm_iter iter;
1374 mailbox = mlx4_alloc_cmd_mailbox(dev);
1375 if (IS_ERR(mailbox))
1376 return PTR_ERR(mailbox);
1377 pages = mailbox->buf;
1379 for (mlx4_icm_first(icm, &iter);
1380 !mlx4_icm_last(&iter);
1381 mlx4_icm_next(&iter)) {
1383 * We have to pass pages that are aligned to their
1384 * size, so find the least significant 1 in the
1385 * address or size and use that as our log2 size.
1387 lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
1388 if (lg < MLX4_ICM_PAGE_SHIFT) {
1389 mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
1391 (unsigned long long) mlx4_icm_addr(&iter),
1392 mlx4_icm_size(&iter));
1397 for (i = 0; i < mlx4_icm_size(&iter) >> lg; ++i) {
1399 pages[nent * 2] = cpu_to_be64(virt);
1403 pages[nent * 2 + 1] =
1404 cpu_to_be64((mlx4_icm_addr(&iter) + (i << lg)) |
1405 (lg - MLX4_ICM_PAGE_SHIFT));
1406 ts += 1 << (lg - 10);
1409 if (++nent == MLX4_MAILBOX_SIZE / 16) {
1410 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1411 MLX4_CMD_TIME_CLASS_B,
1421 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1422 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1427 case MLX4_CMD_MAP_FA:
1428 mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
1430 case MLX4_CMD_MAP_ICM_AUX:
1431 mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
1433 case MLX4_CMD_MAP_ICM:
1434 mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
1435 tc, ts, (unsigned long long) virt - (ts << 10));
1440 mlx4_free_cmd_mailbox(dev, mailbox);
1444 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
1446 return mlx4_map_cmd(dev, MLX4_CMD_MAP_FA, icm, -1);
1449 int mlx4_UNMAP_FA(struct mlx4_dev *dev)
1451 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
1452 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1456 int mlx4_RUN_FW(struct mlx4_dev *dev)
1458 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
1459 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1462 int mlx4_QUERY_FW(struct mlx4_dev *dev)
1464 struct mlx4_fw *fw = &mlx4_priv(dev)->fw;
1465 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
1466 struct mlx4_cmd_mailbox *mailbox;
1473 #define QUERY_FW_OUT_SIZE 0x100
1474 #define QUERY_FW_VER_OFFSET 0x00
1475 #define QUERY_FW_PPF_ID 0x09
1476 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1477 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1478 #define QUERY_FW_ERR_START_OFFSET 0x30
1479 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1480 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1482 #define QUERY_FW_SIZE_OFFSET 0x00
1483 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1484 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1486 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1487 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1489 #define QUERY_FW_CLOCK_OFFSET 0x50
1490 #define QUERY_FW_CLOCK_BAR 0x58
1492 mailbox = mlx4_alloc_cmd_mailbox(dev);
1493 if (IS_ERR(mailbox))
1494 return PTR_ERR(mailbox);
1495 outbox = mailbox->buf;
1497 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1498 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1502 MLX4_GET(fw_ver, outbox, QUERY_FW_VER_OFFSET);
1504 * FW subminor version is at more significant bits than minor
1505 * version, so swap here.
1507 dev->caps.fw_ver = (fw_ver & 0xffff00000000ull) |
1508 ((fw_ver & 0xffff0000ull) >> 16) |
1509 ((fw_ver & 0x0000ffffull) << 16);
1511 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
1512 dev->caps.function = lg;
1514 if (mlx4_is_slave(dev))
1518 MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
1519 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
1520 cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
1521 mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
1523 mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
1524 (int) (dev->caps.fw_ver >> 32),
1525 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1526 (int) dev->caps.fw_ver & 0xffff);
1527 mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
1528 MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
1533 if (cmd_if_rev < MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS)
1534 dev->flags |= MLX4_FLAG_OLD_PORT_CMDS;
1536 MLX4_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
1537 cmd->max_cmds = 1 << lg;
1539 mlx4_dbg(dev, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1540 (int) (dev->caps.fw_ver >> 32),
1541 (int) (dev->caps.fw_ver >> 16) & 0xffff,
1542 (int) dev->caps.fw_ver & 0xffff,
1543 cmd_if_rev, cmd->max_cmds);
1545 MLX4_GET(fw->catas_offset, outbox, QUERY_FW_ERR_START_OFFSET);
1546 MLX4_GET(fw->catas_size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
1547 MLX4_GET(fw->catas_bar, outbox, QUERY_FW_ERR_BAR_OFFSET);
1548 fw->catas_bar = (fw->catas_bar >> 6) * 2;
1550 mlx4_dbg(dev, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1551 (unsigned long long) fw->catas_offset, fw->catas_size, fw->catas_bar);
1553 MLX4_GET(fw->fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
1554 MLX4_GET(fw->clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
1555 MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
1556 fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
1558 MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
1559 MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
1560 fw->comm_bar = (fw->comm_bar >> 6) * 2;
1561 mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
1562 fw->comm_bar, fw->comm_base);
1563 mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
1565 MLX4_GET(fw->clock_offset, outbox, QUERY_FW_CLOCK_OFFSET);
1566 MLX4_GET(fw->clock_bar, outbox, QUERY_FW_CLOCK_BAR);
1567 fw->clock_bar = (fw->clock_bar >> 6) * 2;
1568 mlx4_dbg(dev, "Internal clock bar:%d offset:0x%llx\n",
1569 fw->clock_bar, fw->clock_offset);
1572 * Round up number of system pages needed in case
1573 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1576 ALIGN(fw->fw_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
1577 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
1579 mlx4_dbg(dev, "Clear int @ %llx, BAR %d\n",
1580 (unsigned long long) fw->clr_int_base, fw->clr_int_bar);
1583 mlx4_free_cmd_mailbox(dev, mailbox);
1587 int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1588 struct mlx4_vhcr *vhcr,
1589 struct mlx4_cmd_mailbox *inbox,
1590 struct mlx4_cmd_mailbox *outbox,
1591 struct mlx4_cmd_info *cmd)
1596 outbuf = outbox->buf;
1597 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
1598 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1602 /* for slaves, set pci PPF ID to invalid and zero out everything
1603 * else except FW version */
1604 outbuf[0] = outbuf[1] = 0;
1605 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
1606 outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
1611 static void get_board_id(void *vsd, char *board_id)
1615 #define VSD_OFFSET_SIG1 0x00
1616 #define VSD_OFFSET_SIG2 0xde
1617 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1618 #define VSD_OFFSET_TS_BOARD_ID 0x20
1620 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1622 memset(board_id, 0, MLX4_BOARD_ID_LEN);
1624 if (be16_to_cpup(vsd + VSD_OFFSET_SIG1) == VSD_SIGNATURE_TOPSPIN &&
1625 be16_to_cpup(vsd + VSD_OFFSET_SIG2) == VSD_SIGNATURE_TOPSPIN) {
1626 strlcpy(board_id, vsd + VSD_OFFSET_TS_BOARD_ID, MLX4_BOARD_ID_LEN);
1629 * The board ID is a string but the firmware byte
1630 * swaps each 4-byte word before passing it back to
1631 * us. Therefore we need to swab it before printing.
1633 u32 *bid_u32 = (u32 *)board_id;
1635 for (i = 0; i < 4; ++i) {
1639 addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
1640 val = get_unaligned(addr);
1642 put_unaligned(val, &bid_u32[i]);
1647 int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
1649 struct mlx4_cmd_mailbox *mailbox;
1653 #define QUERY_ADAPTER_OUT_SIZE 0x100
1654 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1655 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1657 mailbox = mlx4_alloc_cmd_mailbox(dev);
1658 if (IS_ERR(mailbox))
1659 return PTR_ERR(mailbox);
1660 outbox = mailbox->buf;
1662 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
1663 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1667 MLX4_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET);
1669 get_board_id(outbox + QUERY_ADAPTER_VSD_OFFSET / 4,
1673 mlx4_free_cmd_mailbox(dev, mailbox);
1677 int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
1679 struct mlx4_cmd_mailbox *mailbox;
1682 static const u8 a0_dmfs_hw_steering[] = {
1683 [MLX4_STEERING_DMFS_A0_DEFAULT] = 0,
1684 [MLX4_STEERING_DMFS_A0_DYNAMIC] = 1,
1685 [MLX4_STEERING_DMFS_A0_STATIC] = 2,
1686 [MLX4_STEERING_DMFS_A0_DISABLE] = 3
1689 #define INIT_HCA_IN_SIZE 0x200
1690 #define INIT_HCA_VERSION_OFFSET 0x000
1691 #define INIT_HCA_VERSION 2
1692 #define INIT_HCA_VXLAN_OFFSET 0x0c
1693 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1694 #define INIT_HCA_FLAGS_OFFSET 0x014
1695 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1696 #define INIT_HCA_QPC_OFFSET 0x020
1697 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1698 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1699 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1700 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1701 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1702 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1703 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1704 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b)
1705 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1706 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1707 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1708 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1709 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
1710 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1711 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1712 #define INIT_HCA_MCAST_OFFSET 0x0c0
1713 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1714 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13)
1715 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17)
1716 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1717 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1718 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1719 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1720 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1721 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13)
1722 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
1723 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1724 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1725 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1726 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1727 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1728 #define INIT_HCA_TPT_OFFSET 0x0f0
1729 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1730 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1731 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1732 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1733 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1734 #define INIT_HCA_UAR_OFFSET 0x120
1735 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1736 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1738 mailbox = mlx4_alloc_cmd_mailbox(dev);
1739 if (IS_ERR(mailbox))
1740 return PTR_ERR(mailbox);
1741 inbox = mailbox->buf;
1743 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
1745 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
1746 (ilog2(cache_line_size()) - 4) << 5;
1748 #if defined(__LITTLE_ENDIAN)
1749 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
1750 #elif defined(__BIG_ENDIAN)
1751 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 1);
1753 #error Host endianness not defined
1755 /* Check port for UD address vector: */
1756 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1);
1758 /* Enable IPoIB checksumming if we can: */
1759 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
1760 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 3);
1762 /* Enable QoS support if module parameter set */
1763 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG && enable_qos)
1764 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
1766 /* enable counters */
1767 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
1768 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
1770 /* Enable RSS spread to fragmented IP packets when supported */
1771 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_RSS_IP_FRAG)
1772 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 13);
1774 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1775 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) {
1776 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29);
1777 dev->caps.eqe_size = 64;
1778 dev->caps.eqe_factor = 1;
1780 dev->caps.eqe_size = 32;
1781 dev->caps.eqe_factor = 0;
1784 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) {
1785 *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30);
1786 dev->caps.cqe_size = 64;
1787 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1789 dev->caps.cqe_size = 32;
1792 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1793 if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) &&
1794 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) {
1795 dev->caps.eqe_size = cache_line_size();
1796 dev->caps.cqe_size = cache_line_size();
1797 dev->caps.eqe_factor = 0;
1798 MLX4_PUT(inbox, (u8)((ilog2(dev->caps.eqe_size) - 5) << 4 |
1799 (ilog2(dev->caps.eqe_size) - 5)),
1800 INIT_HCA_EQE_CQE_STRIDE_OFFSET);
1802 /* User still need to know to support CQE > 32B */
1803 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1806 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
1807 *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31);
1809 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1811 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
1812 MLX4_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET);
1813 MLX4_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET);
1814 MLX4_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET);
1815 MLX4_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET);
1816 MLX4_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET);
1817 MLX4_PUT(inbox, param->altc_base, INIT_HCA_ALTC_BASE_OFFSET);
1818 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET);
1819 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET);
1820 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET);
1821 MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET);
1822 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
1823 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
1825 /* steering attributes */
1826 if (dev->caps.steering_mode ==
1827 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1828 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
1830 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
1832 MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
1833 MLX4_PUT(inbox, param->log_mc_entry_sz,
1834 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1835 MLX4_PUT(inbox, param->log_mc_table_sz,
1836 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1837 /* Enable Ethernet flow steering
1838 * with udp unicast and tcp unicast
1840 if (dev->caps.dmfs_high_steer_mode !=
1841 MLX4_STEERING_DMFS_A0_STATIC)
1843 (u8)(MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1844 INIT_HCA_FS_ETH_BITS_OFFSET);
1845 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1846 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
1847 /* Enable IPoIB flow steering
1848 * with udp unicast and tcp unicast
1850 MLX4_PUT(inbox, (u8) (MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN),
1851 INIT_HCA_FS_IB_BITS_OFFSET);
1852 MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
1853 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
1855 if (dev->caps.dmfs_high_steer_mode !=
1856 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
1858 ((u8)(a0_dmfs_hw_steering[dev->caps.dmfs_high_steer_mode]
1860 INIT_HCA_FS_A0_OFFSET);
1862 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
1863 MLX4_PUT(inbox, param->log_mc_entry_sz,
1864 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1865 MLX4_PUT(inbox, param->log_mc_hash_sz,
1866 INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1867 MLX4_PUT(inbox, param->log_mc_table_sz,
1868 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1869 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
1870 MLX4_PUT(inbox, (u8) (1 << 3),
1871 INIT_HCA_UC_STEERING_OFFSET);
1874 /* TPT attributes */
1876 MLX4_PUT(inbox, param->dmpt_base, INIT_HCA_DMPT_BASE_OFFSET);
1877 MLX4_PUT(inbox, param->mw_enabled, INIT_HCA_TPT_MW_OFFSET);
1878 MLX4_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
1879 MLX4_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
1880 MLX4_PUT(inbox, param->cmpt_base, INIT_HCA_CMPT_BASE_OFFSET);
1882 /* UAR attributes */
1884 MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
1885 MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
1887 /* set parser VXLAN attributes */
1888 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) {
1889 u8 parser_params = 0;
1890 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET);
1893 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA,
1894 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1897 mlx4_err(dev, "INIT_HCA returns %d\n", err);
1899 mlx4_free_cmd_mailbox(dev, mailbox);
1903 int mlx4_QUERY_HCA(struct mlx4_dev *dev,
1904 struct mlx4_init_hca_param *param)
1906 struct mlx4_cmd_mailbox *mailbox;
1913 static const u8 a0_dmfs_query_hw_steering[] = {
1914 [0] = MLX4_STEERING_DMFS_A0_DEFAULT,
1915 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
1916 [2] = MLX4_STEERING_DMFS_A0_STATIC,
1917 [3] = MLX4_STEERING_DMFS_A0_DISABLE
1920 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
1921 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
1923 mailbox = mlx4_alloc_cmd_mailbox(dev);
1924 if (IS_ERR(mailbox))
1925 return PTR_ERR(mailbox);
1926 outbox = mailbox->buf;
1928 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
1930 MLX4_CMD_TIME_CLASS_B,
1931 !mlx4_is_slave(dev));
1935 MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
1936 MLX4_GET(param->hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
1938 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1940 MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
1941 param->qpc_base = qword_field & ~((u64)0x1f);
1942 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
1943 param->log_num_qps = byte_field & 0x1f;
1944 MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
1945 param->srqc_base = qword_field & ~((u64)0x1f);
1946 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
1947 param->log_num_srqs = byte_field & 0x1f;
1948 MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
1949 param->cqc_base = qword_field & ~((u64)0x1f);
1950 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
1951 param->log_num_cqs = byte_field & 0x1f;
1952 MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
1953 param->altc_base = qword_field;
1954 MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
1955 param->auxc_base = qword_field;
1956 MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
1957 param->eqc_base = qword_field & ~((u64)0x1f);
1958 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
1959 param->log_num_eqs = byte_field & 0x1f;
1960 MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
1961 param->num_sys_eqs = word_field & 0xfff;
1962 MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
1963 param->rdmarc_base = qword_field & ~((u64)0x1f);
1964 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
1965 param->log_rd_per_qp = byte_field & 0x7;
1967 MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
1968 if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
1969 param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1971 MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET);
1972 if (byte_field & 0x8)
1973 param->steering_mode = MLX4_STEERING_MODE_B0;
1975 param->steering_mode = MLX4_STEERING_MODE_A0;
1978 if (dword_field & (1 << 13))
1979 param->rss_ip_frags = 1;
1981 /* steering attributes */
1982 if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
1983 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
1984 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
1985 param->log_mc_entry_sz = byte_field & 0x1f;
1986 MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
1987 param->log_mc_table_sz = byte_field & 0x1f;
1988 MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
1989 param->dmfs_high_steer_mode =
1990 a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
1992 MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
1993 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
1994 param->log_mc_entry_sz = byte_field & 0x1f;
1995 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
1996 param->log_mc_hash_sz = byte_field & 0x1f;
1997 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
1998 param->log_mc_table_sz = byte_field & 0x1f;
2001 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
2002 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS);
2003 if (byte_field & 0x20) /* 64-bytes eqe enabled */
2004 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED;
2005 if (byte_field & 0x40) /* 64-bytes cqe enabled */
2006 param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED;
2008 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
2009 MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_STRIDE_OFFSET);
2011 param->dev_cap_enabled |= MLX4_DEV_CAP_EQE_STRIDE_ENABLED;
2012 param->dev_cap_enabled |= MLX4_DEV_CAP_CQE_STRIDE_ENABLED;
2013 param->cqe_size = 1 << ((byte_field &
2014 MLX4_CQE_SIZE_MASK_STRIDE) + 5);
2015 param->eqe_size = 1 << (((byte_field &
2016 MLX4_EQE_SIZE_MASK_STRIDE) >> 4) + 5);
2019 /* TPT attributes */
2021 MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
2022 MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
2023 param->mw_enabled = byte_field >> 7;
2024 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
2025 param->log_mpt_sz = byte_field & 0x3f;
2026 MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
2027 MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
2029 /* UAR attributes */
2031 MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
2032 MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
2033 param->log_uar_sz = byte_field & 0xf;
2035 /* phv_check enable */
2036 MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
2037 if (byte_field & 0x2)
2038 param->phv_check_en = 1;
2040 mlx4_free_cmd_mailbox(dev, mailbox);
2045 static int mlx4_hca_core_clock_update(struct mlx4_dev *dev)
2047 struct mlx4_cmd_mailbox *mailbox;
2051 mailbox = mlx4_alloc_cmd_mailbox(dev);
2052 if (IS_ERR(mailbox)) {
2053 mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n");
2054 return PTR_ERR(mailbox);
2056 outbox = mailbox->buf;
2058 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2060 MLX4_CMD_TIME_CLASS_B,
2061 !mlx4_is_slave(dev));
2063 mlx4_warn(dev, "hca_core_clock update failed\n");
2067 MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET);
2070 mlx4_free_cmd_mailbox(dev, mailbox);
2075 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
2076 * and real QP0 are active, so that the paravirtualized QP0 is ready
2078 static int check_qp0_state(struct mlx4_dev *dev, int function, int port)
2080 struct mlx4_priv *priv = mlx4_priv(dev);
2081 /* irrelevant if not infiniband */
2082 if (priv->mfunc.master.qp0_state[port].proxy_qp0_active &&
2083 priv->mfunc.master.qp0_state[port].qp0_active)
2088 int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
2089 struct mlx4_vhcr *vhcr,
2090 struct mlx4_cmd_mailbox *inbox,
2091 struct mlx4_cmd_mailbox *outbox,
2092 struct mlx4_cmd_info *cmd)
2094 struct mlx4_priv *priv = mlx4_priv(dev);
2095 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2101 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
2104 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2105 /* Enable port only if it was previously disabled */
2106 if (!priv->mfunc.master.init_port_ref[port]) {
2107 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2108 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2112 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2114 if (slave == mlx4_master_func_num(dev)) {
2115 if (check_qp0_state(dev, slave, port) &&
2116 !priv->mfunc.master.qp0_state[port].port_active) {
2117 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2118 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2121 priv->mfunc.master.qp0_state[port].port_active = 1;
2122 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2125 priv->mfunc.master.slave_state[slave].init_port_mask |= (1 << port);
2127 ++priv->mfunc.master.init_port_ref[port];
2131 int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
2133 struct mlx4_cmd_mailbox *mailbox;
2139 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
2140 #define INIT_PORT_IN_SIZE 256
2141 #define INIT_PORT_FLAGS_OFFSET 0x00
2142 #define INIT_PORT_FLAG_SIG (1 << 18)
2143 #define INIT_PORT_FLAG_NG (1 << 17)
2144 #define INIT_PORT_FLAG_G0 (1 << 16)
2145 #define INIT_PORT_VL_SHIFT 4
2146 #define INIT_PORT_PORT_WIDTH_SHIFT 8
2147 #define INIT_PORT_MTU_OFFSET 0x04
2148 #define INIT_PORT_MAX_GID_OFFSET 0x06
2149 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
2150 #define INIT_PORT_GUID0_OFFSET 0x10
2151 #define INIT_PORT_NODE_GUID_OFFSET 0x18
2152 #define INIT_PORT_SI_GUID_OFFSET 0x20
2154 mailbox = mlx4_alloc_cmd_mailbox(dev);
2155 if (IS_ERR(mailbox))
2156 return PTR_ERR(mailbox);
2157 inbox = mailbox->buf;
2160 flags |= (dev->caps.vl_cap[port] & 0xf) << INIT_PORT_VL_SHIFT;
2161 flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
2162 MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
2164 field = 128 << dev->caps.ib_mtu_cap[port];
2165 MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
2166 field = dev->caps.gid_table_len[port];
2167 MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
2168 field = dev->caps.pkey_table_len[port];
2169 MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
2171 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
2172 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2174 mlx4_free_cmd_mailbox(dev, mailbox);
2176 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
2177 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2180 mlx4_hca_core_clock_update(dev);
2184 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
2186 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
2187 struct mlx4_vhcr *vhcr,
2188 struct mlx4_cmd_mailbox *inbox,
2189 struct mlx4_cmd_mailbox *outbox,
2190 struct mlx4_cmd_info *cmd)
2192 struct mlx4_priv *priv = mlx4_priv(dev);
2193 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
2199 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
2203 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) {
2204 if (priv->mfunc.master.init_port_ref[port] == 1) {
2205 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2206 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2210 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2212 /* infiniband port */
2213 if (slave == mlx4_master_func_num(dev)) {
2214 if (!priv->mfunc.master.qp0_state[port].qp0_active &&
2215 priv->mfunc.master.qp0_state[port].port_active) {
2216 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2217 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2220 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2221 priv->mfunc.master.qp0_state[port].port_active = 0;
2224 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
2226 --priv->mfunc.master.init_port_ref[port];
2230 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
2232 return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT,
2233 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2235 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
2237 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
2239 return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA,
2240 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
2243 struct mlx4_config_dev {
2244 __be32 update_flags;
2246 __be16 vxlan_udp_dport;
2256 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
2257 #define MLX4_DISABLE_RX_PORT BIT(18)
2259 static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2262 struct mlx4_cmd_mailbox *mailbox;
2264 mailbox = mlx4_alloc_cmd_mailbox(dev);
2265 if (IS_ERR(mailbox))
2266 return PTR_ERR(mailbox);
2268 memcpy(mailbox->buf, config_dev, sizeof(*config_dev));
2270 err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_CONFIG_DEV,
2271 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2273 mlx4_free_cmd_mailbox(dev, mailbox);
2277 static int mlx4_CONFIG_DEV_get(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2280 struct mlx4_cmd_mailbox *mailbox;
2282 mailbox = mlx4_alloc_cmd_mailbox(dev);
2283 if (IS_ERR(mailbox))
2284 return PTR_ERR(mailbox);
2286 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 1, MLX4_CMD_CONFIG_DEV,
2287 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2290 memcpy(config_dev, mailbox->buf, sizeof(*config_dev));
2292 mlx4_free_cmd_mailbox(dev, mailbox);
2296 /* Conversion between the HW values and the actual functionality.
2297 * The value represented by the array index,
2298 * and the functionality determined by the flags.
2300 static const u8 config_dev_csum_flags[] = {
2302 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP,
2303 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP |
2304 MLX4_RX_CSUM_MODE_L4,
2305 [3] = MLX4_RX_CSUM_MODE_L4 |
2306 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP |
2307 MLX4_RX_CSUM_MODE_MULTI_VLAN
2310 int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
2311 struct mlx4_config_dev_params *params)
2313 struct mlx4_config_dev config_dev = {0};
2317 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
2318 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
2319 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
2321 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_CONFIG_DEV))
2324 err = mlx4_CONFIG_DEV_get(dev, &config_dev);
2328 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET) &
2329 CONFIG_DEV_RX_CSUM_MODE_MASK;
2331 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2333 params->rx_csum_flags_port_1 = config_dev_csum_flags[csum_mask];
2335 csum_mask = (config_dev.rx_checksum_val >> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET) &
2336 CONFIG_DEV_RX_CSUM_MODE_MASK;
2338 if (csum_mask >= sizeof(config_dev_csum_flags)/sizeof(config_dev_csum_flags[0]))
2340 params->rx_csum_flags_port_2 = config_dev_csum_flags[csum_mask];
2342 params->vxlan_udp_dport = be16_to_cpu(config_dev.vxlan_udp_dport);
2346 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval);
2348 int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2350 struct mlx4_config_dev config_dev;
2352 memset(&config_dev, 0, sizeof(config_dev));
2353 config_dev.update_flags = cpu_to_be32(MLX4_VXLAN_UDP_DPORT);
2354 config_dev.vxlan_udp_dport = udp_port;
2356 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2358 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2360 #define CONFIG_DISABLE_RX_PORT BIT(15)
2361 int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
2363 struct mlx4_config_dev config_dev;
2365 memset(&config_dev, 0, sizeof(config_dev));
2366 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
2368 config_dev.roce_flags =
2369 cpu_to_be32(CONFIG_DISABLE_RX_PORT);
2371 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2374 int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
2376 struct mlx4_cmd_mailbox *mailbox;
2383 mailbox = mlx4_alloc_cmd_mailbox(dev);
2384 if (IS_ERR(mailbox))
2388 v2p->v_port1 = cpu_to_be32(port1);
2389 v2p->v_port2 = cpu_to_be32(port2);
2391 err = mlx4_cmd(dev, mailbox->dma, 0,
2392 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
2393 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2395 mlx4_free_cmd_mailbox(dev, mailbox);
2400 int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2402 int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
2403 MLX4_CMD_SET_ICM_SIZE,
2404 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2409 * Round up number of system pages needed in case
2410 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
2412 *aux_pages = ALIGN(*aux_pages, PAGE_SIZE / MLX4_ICM_PAGE_SIZE) >>
2413 (PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT);
2418 int mlx4_NOP(struct mlx4_dev *dev)
2420 /* Input modifier of 0x1f means "finish as soon as possible." */
2421 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A,
2425 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
2429 struct mlx4_cmd_mailbox *mailbox;
2431 u32 guid_hi, guid_lo;
2433 #define MOD_STAT_CFG_PORT_OFFSET 8
2434 #define MOD_STAT_CFG_GUID_H 0X14
2435 #define MOD_STAT_CFG_GUID_L 0X1c
2437 mailbox = mlx4_alloc_cmd_mailbox(dev);
2438 if (IS_ERR(mailbox))
2439 return PTR_ERR(mailbox);
2440 outbox = mailbox->buf;
2442 for (port = 1; port <= dev->caps.num_ports; port++) {
2443 in_mod = port << MOD_STAT_CFG_PORT_OFFSET;
2444 err = mlx4_cmd_box(dev, 0, mailbox->dma, in_mod, 0x2,
2445 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2448 mlx4_err(dev, "Fail to get port %d uplink guid\n",
2452 MLX4_GET(guid_hi, outbox, MOD_STAT_CFG_GUID_H);
2453 MLX4_GET(guid_lo, outbox, MOD_STAT_CFG_GUID_L);
2454 dev->caps.phys_port_id[port] = (u64)guid_lo |
2458 mlx4_free_cmd_mailbox(dev, mailbox);
2462 #define MLX4_WOL_SETUP_MODE (5 << 28)
2463 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
2465 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2467 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
2468 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
2471 EXPORT_SYMBOL_GPL(mlx4_wol_read);
2473 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
2475 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
2477 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
2478 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
2480 EXPORT_SYMBOL_GPL(mlx4_wol_write);
2487 void mlx4_opreq_action(struct work_struct *work)
2489 struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
2491 struct mlx4_dev *dev = &priv->dev;
2492 int num_tasks = atomic_read(&priv->opreq_count);
2493 struct mlx4_cmd_mailbox *mailbox;
2494 struct mlx4_mgm *mgm;
2506 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
2507 #define GET_OP_REQ_TOKEN_OFFSET 0x14
2508 #define GET_OP_REQ_TYPE_OFFSET 0x1a
2509 #define GET_OP_REQ_DATA_OFFSET 0x20
2511 mailbox = mlx4_alloc_cmd_mailbox(dev);
2512 if (IS_ERR(mailbox)) {
2513 mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
2516 outbox = mailbox->buf;
2519 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
2520 MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2523 mlx4_err(dev, "Failed to retrieve required operation: %d\n",
2527 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
2528 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
2529 MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
2534 if (dev->caps.steering_mode ==
2535 MLX4_STEERING_MODE_DEVICE_MANAGED) {
2536 mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2540 mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
2541 GET_OP_REQ_DATA_OFFSET);
2542 num_qps = be32_to_cpu(mgm->members_count) &
2544 rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
2545 prot = ((u8 *)(&mgm->members_count))[0] >> 6;
2547 for (i = 0; i < num_qps; i++) {
2548 qp.qpn = be32_to_cpu(mgm->qp[i]);
2550 err = mlx4_multicast_detach(dev, &qp,
2554 err = mlx4_multicast_attach(dev, &qp,
2564 mlx4_warn(dev, "Bad type for required operation\n");
2568 err = mlx4_cmd(dev, 0, ((u32) err |
2569 (__force u32)cpu_to_be32(token) << 16),
2570 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
2573 mlx4_err(dev, "Failed to acknowledge required request: %d\n",
2577 memset(outbox, 0, 0xffc);
2578 num_tasks = atomic_dec_return(&priv->opreq_count);
2582 mlx4_free_cmd_mailbox(dev, mailbox);
2585 static int mlx4_check_smp_firewall_active(struct mlx4_dev *dev,
2586 struct mlx4_cmd_mailbox *mailbox)
2588 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
2589 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
2590 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
2591 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
2593 u32 set_attr_mask, getresp_attr_mask;
2594 u32 trap_attr_mask, traprepress_attr_mask;
2596 MLX4_GET(set_attr_mask, mailbox->buf,
2597 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET);
2598 mlx4_dbg(dev, "SMP firewall set_attribute_mask = 0x%x\n",
2601 MLX4_GET(getresp_attr_mask, mailbox->buf,
2602 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET);
2603 mlx4_dbg(dev, "SMP firewall getresp_attribute_mask = 0x%x\n",
2606 MLX4_GET(trap_attr_mask, mailbox->buf,
2607 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET);
2608 mlx4_dbg(dev, "SMP firewall trap_attribute_mask = 0x%x\n",
2611 MLX4_GET(traprepress_attr_mask, mailbox->buf,
2612 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET);
2613 mlx4_dbg(dev, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2614 traprepress_attr_mask);
2616 if (set_attr_mask && getresp_attr_mask && trap_attr_mask &&
2617 traprepress_attr_mask)
2623 int mlx4_config_mad_demux(struct mlx4_dev *dev)
2625 struct mlx4_cmd_mailbox *mailbox;
2626 int secure_host_active;
2629 /* Check if mad_demux is supported */
2630 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_MAD_DEMUX))
2633 mailbox = mlx4_alloc_cmd_mailbox(dev);
2634 if (IS_ERR(mailbox)) {
2635 mlx4_warn(dev, "Failed to allocate mailbox for cmd MAD_DEMUX");
2639 /* Query mad_demux to find out which MADs are handled by internal sma */
2640 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0x01 /* subn mgmt class */,
2641 MLX4_CMD_MAD_DEMUX_QUERY_RESTR, MLX4_CMD_MAD_DEMUX,
2642 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2644 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2649 secure_host_active = mlx4_check_smp_firewall_active(dev, mailbox);
2651 /* Config mad_demux to handle all MADs returned by the query above */
2652 err = mlx4_cmd(dev, mailbox->dma, 0x01 /* subn mgmt class */,
2653 MLX4_CMD_MAD_DEMUX_CONFIG, MLX4_CMD_MAD_DEMUX,
2654 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2656 mlx4_warn(dev, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err);
2660 if (secure_host_active)
2661 mlx4_warn(dev, "HCA operating in secure-host mode. SMP firewall activated.\n");
2663 mlx4_free_cmd_mailbox(dev, mailbox);
2667 /* Access Reg commands */
2668 enum mlx4_access_reg_masks {
2669 MLX4_ACCESS_REG_STATUS_MASK = 0x7f,
2670 MLX4_ACCESS_REG_METHOD_MASK = 0x7f,
2671 MLX4_ACCESS_REG_LEN_MASK = 0x7ff
2674 struct mlx4_access_reg {
2684 #define MLX4_ACCESS_REG_HEADER_SIZE (20)
2685 u8 reg_data[MLX4_MAILBOX_SIZE-MLX4_ACCESS_REG_HEADER_SIZE];
2686 } __attribute__((__packed__));
2689 * mlx4_ACCESS_REG - Generic access reg command.
2691 * @reg_id: register ID to access.
2692 * @method: Access method Read/Write.
2693 * @reg_len: register length to Read/Write in bytes.
2694 * @reg_data: reg_data pointer to Read/Write From/To.
2696 * Access ConnectX registers FW command.
2697 * Returns 0 on success and copies outbox mlx4_access_reg data
2698 * field into reg_data or a negative error code.
2700 static int mlx4_ACCESS_REG(struct mlx4_dev *dev, u16 reg_id,
2701 enum mlx4_access_reg_method method,
2702 u16 reg_len, void *reg_data)
2704 struct mlx4_cmd_mailbox *inbox, *outbox;
2705 struct mlx4_access_reg *inbuf, *outbuf;
2708 inbox = mlx4_alloc_cmd_mailbox(dev);
2710 return PTR_ERR(inbox);
2712 outbox = mlx4_alloc_cmd_mailbox(dev);
2713 if (IS_ERR(outbox)) {
2714 mlx4_free_cmd_mailbox(dev, inbox);
2715 return PTR_ERR(outbox);
2719 outbuf = outbox->buf;
2721 inbuf->constant1 = cpu_to_be16(0x1<<11 | 0x4);
2722 inbuf->constant2 = 0x1;
2723 inbuf->reg_id = cpu_to_be16(reg_id);
2724 inbuf->method = method & MLX4_ACCESS_REG_METHOD_MASK;
2726 reg_len = min(reg_len, (u16)(sizeof(inbuf->reg_data)));
2728 cpu_to_be16(((reg_len/4 + 1) & MLX4_ACCESS_REG_LEN_MASK) |
2731 memcpy(inbuf->reg_data, reg_data, reg_len);
2732 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, 0, 0,
2733 MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2738 if (outbuf->status & MLX4_ACCESS_REG_STATUS_MASK) {
2739 err = outbuf->status & MLX4_ACCESS_REG_STATUS_MASK;
2741 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2746 memcpy(reg_data, outbuf->reg_data, reg_len);
2748 mlx4_free_cmd_mailbox(dev, inbox);
2749 mlx4_free_cmd_mailbox(dev, outbox);
2753 /* ConnectX registers IDs */
2755 MLX4_REG_ID_PTYS = 0x5004,
2759 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2762 * @method: Access method Read/Write.
2763 * @ptys_reg: PTYS register data pointer.
2765 * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2767 * Returns 0 on success or a negative error code.
2769 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
2770 enum mlx4_access_reg_method method,
2771 struct mlx4_ptys_reg *ptys_reg)
2773 return mlx4_ACCESS_REG(dev, MLX4_REG_ID_PTYS,
2774 method, sizeof(*ptys_reg), ptys_reg);
2776 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG);
2778 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
2779 struct mlx4_vhcr *vhcr,
2780 struct mlx4_cmd_mailbox *inbox,
2781 struct mlx4_cmd_mailbox *outbox,
2782 struct mlx4_cmd_info *cmd)
2784 struct mlx4_access_reg *inbuf = inbox->buf;
2785 u8 method = inbuf->method & MLX4_ACCESS_REG_METHOD_MASK;
2786 u16 reg_id = be16_to_cpu(inbuf->reg_id);
2788 if (slave != mlx4_master_func_num(dev) &&
2789 method == MLX4_ACCESS_REG_WRITE)
2792 if (reg_id == MLX4_REG_ID_PTYS) {
2793 struct mlx4_ptys_reg *ptys_reg =
2794 (struct mlx4_ptys_reg *)inbuf->reg_data;
2796 ptys_reg->local_port =
2797 mlx4_slave_convert_port(dev, slave,
2798 ptys_reg->local_port);
2801 return mlx4_cmd_box(dev, inbox->dma, outbox->dma, vhcr->in_modifier,
2802 0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
2806 static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
2808 #define SET_PORT_GEN_PHV_VALID 0x10
2809 #define SET_PORT_GEN_PHV_EN 0x80
2811 struct mlx4_cmd_mailbox *mailbox;
2812 struct mlx4_set_port_general_context *context;
2816 mailbox = mlx4_alloc_cmd_mailbox(dev);
2817 if (IS_ERR(mailbox))
2818 return PTR_ERR(mailbox);
2819 context = mailbox->buf;
2821 context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID;
2823 context->phv_en |= SET_PORT_GEN_PHV_EN;
2825 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
2826 err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
2827 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
2830 mlx4_free_cmd_mailbox(dev, mailbox);
2834 int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
2837 struct mlx4_func_cap func_cap;
2839 memset(&func_cap, 0, sizeof(func_cap));
2840 err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
2842 *phv = func_cap.flags0 & QUERY_FUNC_CAP_PHV_BIT;
2845 EXPORT_SYMBOL(get_phv_bit);
2847 int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
2851 if (mlx4_is_slave(dev))
2854 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
2855 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
2856 ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
2858 dev->caps.phv_bit[port] = new_val;
2864 EXPORT_SYMBOL(set_phv_bit);
2866 void mlx4_replace_zero_macs(struct mlx4_dev *dev)
2869 u8 mac_addr[ETH_ALEN];
2871 dev->port_random_macs = 0;
2872 for (i = 1; i <= dev->caps.num_ports; ++i)
2873 if (!dev->caps.def_mac[i] &&
2874 dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
2875 eth_random_addr(mac_addr);
2876 dev->port_random_macs |= 1 << i;
2877 dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
2880 EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);