2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/interrupt.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/mlx4/cmd.h>
41 #include <linux/cpu_rmap.h>
47 MLX4_IRQNAME_SIZE = 32
51 MLX4_NUM_ASYNC_EQE = 0x100,
52 MLX4_NUM_SPARE_EQE = 0x80,
53 MLX4_EQ_ENTRY_SIZE = 0x20
56 #define MLX4_EQ_STATUS_OK ( 0 << 28)
57 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
58 #define MLX4_EQ_OWNER_SW ( 0 << 24)
59 #define MLX4_EQ_OWNER_HW ( 1 << 24)
60 #define MLX4_EQ_FLAG_EC ( 1 << 18)
61 #define MLX4_EQ_FLAG_OI ( 1 << 17)
62 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
63 #define MLX4_EQ_STATE_FIRED (10 << 8)
64 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
66 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
67 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
68 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
69 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
70 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
71 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
72 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
73 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
74 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
75 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
76 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
77 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
78 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
79 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
80 (1ull << MLX4_EVENT_TYPE_CMD) | \
81 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
86 static u64 get_async_ev_mask(struct mlx4_dev *dev)
88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
91 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT)
92 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT);
97 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
99 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
102 /* We still want ordering, just not swabbing, so add a barrier */
106 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor,
109 /* (entry & (eq->nent - 1)) gives us a cyclic array */
110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size;
111 /* CX3 is capable of extending the EQE from 32 to 64 bytes with
112 * strides of 64B,128B and 256B.
113 * When 64B EQE is used, the first (in the lower addresses)
114 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
115 * contain the legacy EQE information.
116 * In all other cases, the first 32B contains the legacy EQE info.
118 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
121 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size)
123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size);
124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
127 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
129 struct mlx4_eqe *eqe =
130 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
131 return (!!(eqe->owner & 0x80) ^
132 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
136 void mlx4_gen_slave_eqe(struct work_struct *work)
138 struct mlx4_mfunc_master_ctx *master =
139 container_of(work, struct mlx4_mfunc_master_ctx,
141 struct mlx4_mfunc *mfunc =
142 container_of(master, struct mlx4_mfunc, master);
143 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
144 struct mlx4_dev *dev = &priv->dev;
145 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
146 struct mlx4_eqe *eqe;
148 int i, phys_port, slave_port;
150 for (eqe = next_slave_event_eqe(slave_eq); eqe;
151 eqe = next_slave_event_eqe(slave_eq)) {
152 slave = eqe->slave_id;
154 if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE &&
155 eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN &&
156 mlx4_is_bonded(dev)) {
157 struct mlx4_port_cap port_cap;
159 if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state)
162 if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state)
165 /* All active slaves need to receive the event */
166 if (slave == ALL_SLAVES) {
167 for (i = 0; i <= dev->persist->num_vfs; i++) {
169 if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
170 eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
171 phys_port = eqe->event.port_mgmt_change.port;
172 slave_port = mlx4_phys_to_slave_port(dev, i, phys_port);
173 if (slave_port < 0) /* VF doesn't have this port */
175 eqe->event.port_mgmt_change.port = slave_port;
177 if (mlx4_GEN_EQE(dev, i, eqe))
178 mlx4_warn(dev, "Failed to generate event for slave %d\n",
181 eqe->event.port_mgmt_change.port = phys_port;
184 if (mlx4_GEN_EQE(dev, slave, eqe))
185 mlx4_warn(dev, "Failed to generate event for slave %d\n",
194 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
196 struct mlx4_priv *priv = mlx4_priv(dev);
197 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
198 struct mlx4_eqe *s_eqe;
201 spin_lock_irqsave(&slave_eq->event_lock, flags);
202 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
203 if ((!!(s_eqe->owner & 0x80)) ^
204 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
205 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
207 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
211 memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
212 s_eqe->slave_id = slave;
213 /* ensure all information is written before setting the ownersip bit */
215 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
218 queue_work(priv->mfunc.master.comm_wq,
219 &priv->mfunc.master.slave_event_work);
220 spin_unlock_irqrestore(&slave_eq->event_lock, flags);
223 static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
224 struct mlx4_eqe *eqe)
226 struct mlx4_priv *priv = mlx4_priv(dev);
228 if (slave < 0 || slave > dev->persist->num_vfs ||
229 slave == dev->caps.function ||
230 !priv->mfunc.master.slave_state[slave].active)
233 slave_event(dev, slave, eqe);
236 #if defined(CONFIG_SMP)
237 static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
240 struct mlx4_dev *dev = &priv->dev;
241 struct mlx4_eq *eq = &priv->eq_table.eq[vec];
243 if (!cpumask_available(eq->affinity_mask) ||
244 cpumask_empty(eq->affinity_mask))
247 hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
249 mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
253 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
257 struct mlx4_priv *priv = mlx4_priv(dev);
258 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
260 if (!s_slave->active)
263 memset(&eqe, 0, sizeof(eqe));
265 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
266 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE;
267 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
269 return mlx4_GEN_EQE(dev, slave, &eqe);
271 EXPORT_SYMBOL(mlx4_gen_pkey_eqe);
273 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
277 /*don't send if we don't have the that slave */
278 if (dev->persist->num_vfs < slave)
280 memset(&eqe, 0, sizeof(eqe));
282 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
283 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO;
284 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port);
286 return mlx4_GEN_EQE(dev, slave, &eqe);
288 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe);
290 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
291 u8 port_subtype_change)
294 u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port);
296 /*don't send if we don't have the that slave */
297 if (dev->persist->num_vfs < slave)
299 memset(&eqe, 0, sizeof(eqe));
301 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE;
302 eqe.subtype = port_subtype_change;
303 eqe.event.port_change.port = cpu_to_be32(slave_port << 28);
305 mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
306 port_subtype_change, slave, port);
307 return mlx4_GEN_EQE(dev, slave, &eqe);
309 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe);
311 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port)
313 struct mlx4_priv *priv = mlx4_priv(dev);
314 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
315 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
317 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
318 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
319 pr_err("%s: Error: asking for slave:%d, port:%d\n",
320 __func__, slave, port);
321 return SLAVE_PORT_DOWN;
323 return s_state[slave].port_state[port];
325 EXPORT_SYMBOL(mlx4_get_slave_port_state);
327 static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
328 enum slave_port_state state)
330 struct mlx4_priv *priv = mlx4_priv(dev);
331 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
332 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
334 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
335 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
336 pr_err("%s: Error: asking for slave:%d, port:%d\n",
337 __func__, slave, port);
340 s_state[slave].port_state[port] = state;
345 static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
348 enum slave_port_gen_event gen_event;
349 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
352 for (i = 0; i < dev->persist->num_vfs + 1; i++)
353 if (test_bit(i, slaves_pport.slaves))
354 set_and_calc_slave_port_state(dev, i, port,
357 /**************************************************************************
358 The function get as input the new event to that port,
359 and according to the prev state change the slave's port state.
361 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
362 MLX4_PORT_STATE_DEV_EVENT_PORT_UP
363 MLX4_PORT_STATE_IB_EVENT_GID_VALID
364 MLX4_PORT_STATE_IB_EVENT_GID_INVALID
365 ***************************************************************************/
366 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
368 enum slave_port_gen_event *gen_event)
370 struct mlx4_priv *priv = mlx4_priv(dev);
371 struct mlx4_slave_state *ctx = NULL;
374 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
375 enum slave_port_state cur_state =
376 mlx4_get_slave_port_state(dev, slave, port);
378 *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
380 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
381 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
382 pr_err("%s: Error: asking for slave:%d, port:%d\n",
383 __func__, slave, port);
387 ctx = &priv->mfunc.master.slave_state[slave];
388 spin_lock_irqsave(&ctx->lock, flags);
391 case SLAVE_PORT_DOWN:
392 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
393 mlx4_set_slave_port_state(dev, slave, port,
396 case SLAVE_PENDING_UP:
397 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event)
398 mlx4_set_slave_port_state(dev, slave, port,
400 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) {
401 mlx4_set_slave_port_state(dev, slave, port,
403 *gen_event = SLAVE_PORT_GEN_EVENT_UP;
407 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) {
408 mlx4_set_slave_port_state(dev, slave, port,
410 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
411 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID ==
413 mlx4_set_slave_port_state(dev, slave, port,
415 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
419 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
420 __func__, slave, port);
423 ret = mlx4_get_slave_port_state(dev, slave, port);
426 spin_unlock_irqrestore(&ctx->lock, flags);
430 EXPORT_SYMBOL(set_and_calc_slave_port_state);
432 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
436 memset(&eqe, 0, sizeof(eqe));
438 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT;
439 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO;
440 eqe.event.port_mgmt_change.port = port;
441 eqe.event.port_mgmt_change.params.port_info.changed_attr =
442 cpu_to_be32((u32) attr);
444 slave_event(dev, ALL_SLAVES, &eqe);
447 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev);
449 void mlx4_master_handle_slave_flr(struct work_struct *work)
451 struct mlx4_mfunc_master_ctx *master =
452 container_of(work, struct mlx4_mfunc_master_ctx,
453 slave_flr_event_work);
454 struct mlx4_mfunc *mfunc =
455 container_of(master, struct mlx4_mfunc, master);
456 struct mlx4_priv *priv =
457 container_of(mfunc, struct mlx4_priv, mfunc);
458 struct mlx4_dev *dev = &priv->dev;
459 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
464 mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
466 for (i = 0 ; i < dev->num_slaves; i++) {
468 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
469 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
471 /* In case of 'Reset flow' FLR can be generated for
472 * a slave before mlx4_load_one is done.
473 * make sure interface is up before trying to delete
474 * slave resources which weren't allocated yet.
476 if (dev->persist->interface_state &
477 MLX4_INTERFACE_STATE_UP)
478 mlx4_delete_all_resources_for_slave(dev, i);
479 /*return the slave to running mode*/
480 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
481 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
482 slave_state[i].is_slave_going_down = 0;
483 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
485 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
486 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
488 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
494 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
496 struct mlx4_priv *priv = mlx4_priv(dev);
497 struct mlx4_eqe *eqe;
505 u8 update_slave_state;
507 enum slave_port_gen_event gen_event;
509 struct mlx4_vport_state *s_info;
510 int eqe_size = dev->caps.eqe_size;
512 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) {
514 * Make sure we read EQ entry contents after we've
515 * checked the ownership bit.
520 case MLX4_EVENT_TYPE_COMP:
521 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
522 mlx4_cq_completion(dev, cqn);
525 case MLX4_EVENT_TYPE_PATH_MIG:
526 case MLX4_EVENT_TYPE_COMM_EST:
527 case MLX4_EVENT_TYPE_SQ_DRAINED:
528 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
529 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
530 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
531 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
532 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
533 mlx4_dbg(dev, "event %d arrived\n", eqe->type);
534 if (mlx4_is_master(dev)) {
535 /* forward only to slave owning the QP */
536 ret = mlx4_get_slave_from_resource_id(dev,
538 be32_to_cpu(eqe->event.qp.qpn)
540 if (ret && ret != -ENOENT) {
541 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
542 eqe->type, eqe->subtype,
543 eq->eqn, eq->cons_index, ret);
547 if (!ret && slave != dev->caps.function) {
548 mlx4_slave_event(dev, slave, eqe);
553 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
554 0xffffff, eqe->type);
557 case MLX4_EVENT_TYPE_SRQ_LIMIT:
558 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
559 __func__, be32_to_cpu(eqe->event.srq.srqn),
562 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
563 if (mlx4_is_master(dev)) {
564 /* forward only to slave owning the SRQ */
565 ret = mlx4_get_slave_from_resource_id(dev,
567 be32_to_cpu(eqe->event.srq.srqn)
570 if (ret && ret != -ENOENT) {
571 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
572 eqe->type, eqe->subtype,
573 eq->eqn, eq->cons_index, ret);
577 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
578 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
580 be32_to_cpu(eqe->event.srq.srqn),
581 eqe->type, eqe->subtype);
583 if (!ret && slave != dev->caps.function) {
585 MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
586 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
588 eqe->subtype, slave);
589 mlx4_slave_event(dev, slave, eqe);
593 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
594 0xffffff, eqe->type);
597 case MLX4_EVENT_TYPE_CMD:
599 be16_to_cpu(eqe->event.cmd.token),
600 eqe->event.cmd.status,
601 be64_to_cpu(eqe->event.cmd.out_param));
604 case MLX4_EVENT_TYPE_PORT_CHANGE: {
605 struct mlx4_slaves_pport slaves_port;
606 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
607 slaves_port = mlx4_phys_to_slaves_pport(dev, port);
608 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
610 dev, MLX4_DEV_EVENT_PORT_DOWN, &port);
611 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
612 if (!mlx4_is_master(dev))
614 for (i = 0; i < dev->persist->num_vfs + 1;
616 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
618 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
620 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
621 if (i == mlx4_master_func_num(dev))
623 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
625 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
626 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
627 eqe->event.port_change.port =
629 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
630 | (reported_port << 28));
631 mlx4_slave_event(dev, i, eqe);
633 } else { /* IB port */
634 set_and_calc_slave_port_state(dev, i, port,
635 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
637 /*we can be in pending state, then do not send port_down event*/
638 if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
639 if (i == mlx4_master_func_num(dev))
641 eqe->event.port_change.port =
643 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
644 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
645 mlx4_slave_event(dev, i, eqe);
650 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
653 mlx4_priv(dev)->sense.do_sense_port[port] = 0;
655 if (!mlx4_is_master(dev))
657 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
659 i < dev->persist->num_vfs + 1;
661 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
663 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev))
665 if (i == mlx4_master_func_num(dev))
667 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
668 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
669 eqe->event.port_change.port =
671 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
672 | (reported_port << 28));
673 mlx4_slave_event(dev, i, eqe);
677 /* port-up event will be sent to a slave when the
678 * slave's alias-guid is set. This is done in alias_GUID.c
680 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
685 case MLX4_EVENT_TYPE_CQ_ERROR:
686 mlx4_warn(dev, "CQ %s on CQN %06x\n",
687 eqe->event.cq_err.syndrome == 1 ?
688 "overrun" : "access violation",
689 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
690 if (mlx4_is_master(dev)) {
691 ret = mlx4_get_slave_from_resource_id(dev,
693 be32_to_cpu(eqe->event.cq_err.cqn)
695 if (ret && ret != -ENOENT) {
696 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
697 eqe->type, eqe->subtype,
698 eq->eqn, eq->cons_index, ret);
702 if (!ret && slave != dev->caps.function) {
703 mlx4_slave_event(dev, slave, eqe);
708 be32_to_cpu(eqe->event.cq_err.cqn)
713 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
714 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
717 case MLX4_EVENT_TYPE_OP_REQUIRED:
718 atomic_inc(&priv->opreq_count);
719 /* FW commands can't be executed from interrupt context
720 * working in deferred task
722 queue_work(mlx4_wq, &priv->opreq_task);
725 case MLX4_EVENT_TYPE_COMM_CHANNEL:
726 if (!mlx4_is_master(dev)) {
727 mlx4_warn(dev, "Received comm channel event for non master device\n");
730 memcpy(&priv->mfunc.master.comm_arm_bit_vector,
731 eqe->event.comm_channel_arm.bit_vec,
732 sizeof(eqe->event.comm_channel_arm.bit_vec));
733 queue_work(priv->mfunc.master.comm_wq,
734 &priv->mfunc.master.comm_work);
737 case MLX4_EVENT_TYPE_FLR_EVENT:
738 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
739 if (!mlx4_is_master(dev)) {
740 mlx4_warn(dev, "Non-master function received FLR event\n");
744 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
746 if (flr_slave >= dev->num_slaves) {
748 "Got FLR for unknown function: %d\n",
750 update_slave_state = 0;
752 update_slave_state = 1;
754 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
755 if (update_slave_state) {
756 priv->mfunc.master.slave_state[flr_slave].active = false;
757 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
758 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
760 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
761 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
763 queue_work(priv->mfunc.master.comm_wq,
764 &priv->mfunc.master.slave_flr_event_work);
767 case MLX4_EVENT_TYPE_FATAL_WARNING:
768 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
769 if (mlx4_is_master(dev))
770 for (i = 0; i < dev->num_slaves; i++) {
771 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
773 if (i == dev->caps.function)
775 mlx4_slave_event(dev, i, eqe);
777 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
778 be16_to_cpu(eqe->event.warming.warning_threshold),
779 be16_to_cpu(eqe->event.warming.current_temperature));
781 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
782 eqe->type, eqe->subtype, eq->eqn,
783 eq->cons_index, eqe->owner, eq->nent,
785 !!(eqe->owner & 0x80) ^
786 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
790 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
792 dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, eqe);
795 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT:
796 switch (eqe->subtype) {
797 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
798 mlx4_warn(dev, "Bad cable detected on port %u\n",
799 eqe->event.bad_cable.port);
801 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
802 mlx4_warn(dev, "Unsupported cable detected\n");
806 "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
807 eqe->type, eqe->subtype, eq->eqn,
808 eq->cons_index, eqe->owner, eq->nent,
809 !!(eqe->owner & 0x80) ^
810 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
815 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
816 case MLX4_EVENT_TYPE_ECC_DETECT:
818 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
819 eqe->type, eqe->subtype, eq->eqn,
820 eq->cons_index, eqe->owner, eq->nent,
822 !!(eqe->owner & 0x80) ^
823 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
832 * The HCA will think the queue has overflowed if we
833 * don't tell it we've been processing events. We
834 * create our EQs with MLX4_NUM_SPARE_EQE extra
835 * entries, so we must update our consumer index at
838 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
849 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
851 struct mlx4_dev *dev = dev_ptr;
852 struct mlx4_priv *priv = mlx4_priv(dev);
856 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
858 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
859 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
861 return IRQ_RETVAL(work);
864 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
866 struct mlx4_eq *eq = eq_ptr;
867 struct mlx4_dev *dev = eq->dev;
869 mlx4_eq_int(dev, eq);
871 /* MSI-X vectors always belong to us */
875 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
876 struct mlx4_vhcr *vhcr,
877 struct mlx4_cmd_mailbox *inbox,
878 struct mlx4_cmd_mailbox *outbox,
879 struct mlx4_cmd_info *cmd)
881 struct mlx4_priv *priv = mlx4_priv(dev);
882 struct mlx4_slave_event_eq_info *event_eq =
883 priv->mfunc.master.slave_state[slave].event_eq;
884 u32 in_modifier = vhcr->in_modifier;
885 u32 eqn = in_modifier & 0x3FF;
886 u64 in_param = vhcr->in_param;
890 if (slave == dev->caps.function)
891 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
892 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
895 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
896 if (in_param & (1LL << i))
897 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
902 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
905 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
906 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
910 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
913 return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
914 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
918 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num)
920 return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ,
921 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
924 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
927 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
928 * we need to map, take the difference of highest index and
929 * the lowest index we'll use and add 1.
931 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
932 dev->caps.reserved_eqs / 4 + 1;
935 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
937 struct mlx4_priv *priv = mlx4_priv(dev);
940 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
942 if (!priv->eq_table.uar_map[index]) {
943 priv->eq_table.uar_map[index] =
945 pci_resource_start(dev->persist->pdev, 2) +
946 ((eq->eqn / 4) << (dev->uar_page_shift)),
947 (1 << (dev->uar_page_shift)));
948 if (!priv->eq_table.uar_map[index]) {
949 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
955 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
958 static void mlx4_unmap_uar(struct mlx4_dev *dev)
960 struct mlx4_priv *priv = mlx4_priv(dev);
963 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
964 if (priv->eq_table.uar_map[i]) {
965 iounmap(priv->eq_table.uar_map[i]);
966 priv->eq_table.uar_map[i] = NULL;
970 static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
971 u8 intr, struct mlx4_eq *eq)
973 struct mlx4_priv *priv = mlx4_priv(dev);
974 struct mlx4_cmd_mailbox *mailbox;
975 struct mlx4_eq_context *eq_context;
977 u64 *dma_list = NULL;
984 eq->nent = roundup_pow_of_two(max(nent, 2));
985 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
986 * strides of 64B,128B and 256B.
988 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE;
990 eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list),
995 for (i = 0; i < npages; ++i)
996 eq->page_list[i].buf = NULL;
998 dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL);
1002 mailbox = mlx4_alloc_cmd_mailbox(dev);
1003 if (IS_ERR(mailbox))
1005 eq_context = mailbox->buf;
1007 for (i = 0; i < npages; ++i) {
1008 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
1012 if (!eq->page_list[i].buf)
1013 goto err_out_free_pages;
1016 eq->page_list[i].map = t;
1019 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
1021 goto err_out_free_pages;
1023 eq->doorbell = mlx4_get_eq_uar(dev, eq);
1024 if (!eq->doorbell) {
1026 goto err_out_free_eq;
1029 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
1031 goto err_out_free_eq;
1033 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
1035 goto err_out_free_mtt;
1037 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
1038 MLX4_EQ_STATE_ARMED);
1039 eq_context->log_eq_size = ilog2(eq->nent);
1040 eq_context->intr = intr;
1041 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
1043 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
1044 eq_context->mtt_base_addr_h = mtt_addr >> 32;
1045 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
1047 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
1049 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
1050 goto err_out_free_mtt;
1054 mlx4_free_cmd_mailbox(dev, mailbox);
1058 INIT_LIST_HEAD(&eq->tasklet_ctx.list);
1059 INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
1060 spin_lock_init(&eq->tasklet_ctx.lock);
1061 tasklet_setup(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb);
1066 mlx4_mtt_cleanup(dev, &eq->mtt);
1069 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1072 for (i = 0; i < npages; ++i)
1073 if (eq->page_list[i].buf)
1074 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
1075 eq->page_list[i].buf,
1076 eq->page_list[i].map);
1078 mlx4_free_cmd_mailbox(dev, mailbox);
1081 kfree(eq->page_list);
1088 static void mlx4_free_eq(struct mlx4_dev *dev,
1091 struct mlx4_priv *priv = mlx4_priv(dev);
1094 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with
1095 * strides of 64B,128B and 256B
1097 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
1099 err = mlx4_HW2SW_EQ(dev, eq->eqn);
1101 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
1103 synchronize_irq(eq->irq);
1104 tasklet_disable(&eq->tasklet_ctx.task);
1106 mlx4_mtt_cleanup(dev, &eq->mtt);
1107 for (i = 0; i < npages; ++i)
1108 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
1109 eq->page_list[i].buf,
1110 eq->page_list[i].map);
1112 kfree(eq->page_list);
1113 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR);
1116 static void mlx4_free_irqs(struct mlx4_dev *dev)
1118 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
1121 if (eq_table->have_irq)
1122 free_irq(dev->persist->pdev->irq, dev);
1124 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1125 if (eq_table->eq[i].have_irq) {
1126 free_cpumask_var(eq_table->eq[i].affinity_mask);
1127 irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
1128 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
1129 eq_table->eq[i].have_irq = 0;
1132 kfree(eq_table->irq_names);
1135 static int mlx4_map_clr_int(struct mlx4_dev *dev)
1137 struct mlx4_priv *priv = mlx4_priv(dev);
1139 priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev,
1140 priv->fw.clr_int_bar) +
1141 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1142 if (!priv->clr_base) {
1143 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
1150 static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1152 struct mlx4_priv *priv = mlx4_priv(dev);
1154 iounmap(priv->clr_base);
1157 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1159 struct mlx4_priv *priv = mlx4_priv(dev);
1161 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
1162 sizeof(*priv->eq_table.eq), GFP_KERNEL);
1163 if (!priv->eq_table.eq)
1169 void mlx4_free_eq_table(struct mlx4_dev *dev)
1171 kfree(mlx4_priv(dev)->eq_table.eq);
1174 int mlx4_init_eq_table(struct mlx4_dev *dev)
1176 struct mlx4_priv *priv = mlx4_priv(dev);
1180 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
1181 sizeof(*priv->eq_table.uar_map),
1183 if (!priv->eq_table.uar_map) {
1188 err = mlx4_bitmap_init(&priv->eq_table.bitmap,
1189 roundup_pow_of_two(dev->caps.num_eqs),
1190 dev->caps.num_eqs - 1,
1191 dev->caps.reserved_eqs,
1192 roundup_pow_of_two(dev->caps.num_eqs) -
1197 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
1198 priv->eq_table.uar_map[i] = NULL;
1200 if (!mlx4_is_slave(dev)) {
1201 err = mlx4_map_clr_int(dev);
1203 goto err_out_bitmap;
1205 priv->eq_table.clr_mask =
1206 swab32(1 << (priv->eq_table.inta_pin & 31));
1207 priv->eq_table.clr_int = priv->clr_base +
1208 (priv->eq_table.inta_pin < 32 ? 4 : 0);
1211 priv->eq_table.irq_names =
1212 kmalloc_array(MLX4_IRQNAME_SIZE,
1213 (dev->caps.num_comp_vectors + 1),
1215 if (!priv->eq_table.irq_names) {
1217 goto err_out_clr_int;
1220 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
1221 if (i == MLX4_EQ_ASYNC) {
1222 err = mlx4_create_eq(dev,
1223 MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
1224 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
1226 struct mlx4_eq *eq = &priv->eq_table.eq[i];
1227 #ifdef CONFIG_RFS_ACCEL
1228 int port = find_first_bit(eq->actv_ports.ports,
1229 dev->caps.num_ports) + 1;
1231 if (port <= dev->caps.num_ports) {
1232 struct mlx4_port_info *info =
1233 &mlx4_priv(dev)->port[port];
1236 info->rmap = alloc_irq_cpu_rmap(
1237 mlx4_get_eqs_per_port(dev, port));
1239 mlx4_warn(dev, "Failed to allocate cpu rmap\n");
1245 err = irq_cpu_rmap_add(
1246 info->rmap, eq->irq);
1248 mlx4_warn(dev, "Failed adding irq rmap\n");
1251 err = mlx4_create_eq(dev, dev->quotas.cq +
1253 (dev->flags & MLX4_FLAG_MSI_X) ?
1254 i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
1261 if (dev->flags & MLX4_FLAG_MSI_X) {
1262 const char *eq_name;
1264 snprintf(priv->eq_table.irq_names +
1265 MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
1267 "mlx4-async@pci:%s",
1268 pci_name(dev->persist->pdev));
1269 eq_name = priv->eq_table.irq_names +
1270 MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
1272 err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
1273 mlx4_msi_x_interrupt, 0, eq_name,
1274 priv->eq_table.eq + MLX4_EQ_ASYNC);
1278 priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
1280 snprintf(priv->eq_table.irq_names,
1283 pci_name(dev->persist->pdev));
1284 err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
1285 IRQF_SHARED, priv->eq_table.irq_names, dev);
1289 priv->eq_table.have_irq = 1;
1292 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1293 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
1295 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
1296 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
1299 eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
1305 mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
1306 #ifdef CONFIG_RFS_ACCEL
1307 for (i = 1; i <= dev->caps.num_ports; i++) {
1308 if (mlx4_priv(dev)->port[i].rmap) {
1309 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
1310 mlx4_priv(dev)->port[i].rmap = NULL;
1314 mlx4_free_irqs(dev);
1317 if (!mlx4_is_slave(dev))
1318 mlx4_unmap_clr_int(dev);
1321 mlx4_unmap_uar(dev);
1322 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1325 kfree(priv->eq_table.uar_map);
1330 void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1332 struct mlx4_priv *priv = mlx4_priv(dev);
1335 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
1336 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
1338 #ifdef CONFIG_RFS_ACCEL
1339 for (i = 1; i <= dev->caps.num_ports; i++) {
1340 if (mlx4_priv(dev)->port[i].rmap) {
1341 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
1342 mlx4_priv(dev)->port[i].rmap = NULL;
1346 mlx4_free_irqs(dev);
1348 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1349 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
1351 if (!mlx4_is_slave(dev))
1352 mlx4_unmap_clr_int(dev);
1354 mlx4_unmap_uar(dev);
1355 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1357 kfree(priv->eq_table.uar_map);
1360 /* A test that verifies that we can accept interrupts
1361 * on the vector allocated for asynchronous events
1363 int mlx4_test_async(struct mlx4_dev *dev)
1365 return mlx4_NOP(dev);
1367 EXPORT_SYMBOL(mlx4_test_async);
1369 /* A test that verifies that we can accept interrupts
1370 * on the given irq vector of the tested port.
1371 * Interrupts are checked using the NOP command.
1373 int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
1375 struct mlx4_priv *priv = mlx4_priv(dev);
1378 /* Temporary use polling for command completions */
1379 mlx4_cmd_use_polling(dev);
1381 /* Map the new eq to handle all asynchronous events */
1382 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1383 priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn);
1385 mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
1389 /* Go back to using events */
1390 mlx4_cmd_use_events(dev);
1391 err = mlx4_NOP(dev);
1393 /* Return to default */
1394 mlx4_cmd_use_polling(dev);
1396 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1397 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
1398 mlx4_cmd_use_events(dev);
1402 EXPORT_SYMBOL(mlx4_test_interrupt);
1404 bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
1406 struct mlx4_priv *priv = mlx4_priv(dev);
1408 vector = MLX4_CQ_TO_EQ_VECTOR(vector);
1409 if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
1410 (vector == MLX4_EQ_ASYNC))
1413 return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
1415 EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
1417 u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
1419 struct mlx4_priv *priv = mlx4_priv(dev);
1421 unsigned int sum = 0;
1423 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
1424 sum += !!test_bit(port - 1,
1425 priv->eq_table.eq[i].actv_ports.ports);
1429 EXPORT_SYMBOL(mlx4_get_eqs_per_port);
1431 int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
1433 struct mlx4_priv *priv = mlx4_priv(dev);
1435 vector = MLX4_CQ_TO_EQ_VECTOR(vector);
1436 if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
1439 return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
1440 dev->caps.num_ports) > 1);
1442 EXPORT_SYMBOL(mlx4_is_eq_shared);
1444 struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
1446 return mlx4_priv(dev)->port[port].rmap;
1448 EXPORT_SYMBOL(mlx4_get_cpu_rmap);
1450 int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
1452 struct mlx4_priv *priv = mlx4_priv(dev);
1454 u32 min_ref_count_val = (u32)-1;
1455 int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
1456 int *prequested_vector = NULL;
1459 mutex_lock(&priv->msix_ctl.pool_lock);
1460 if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
1461 (requested_vector >= 0) &&
1462 (requested_vector != MLX4_EQ_ASYNC)) {
1463 if (test_bit(port - 1,
1464 priv->eq_table.eq[requested_vector].actv_ports.ports)) {
1465 prequested_vector = &requested_vector;
1469 for (i = 1; i < port;
1470 requested_vector += mlx4_get_eqs_per_port(dev, i++))
1473 eq = &priv->eq_table.eq[requested_vector];
1474 if (requested_vector < dev->caps.num_comp_vectors + 1 &&
1475 test_bit(port - 1, eq->actv_ports.ports)) {
1476 prequested_vector = &requested_vector;
1481 if (!prequested_vector) {
1482 requested_vector = -1;
1483 for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
1485 struct mlx4_eq *eq = &priv->eq_table.eq[i];
1487 if (min_ref_count_val > eq->ref_count &&
1488 test_bit(port - 1, eq->actv_ports.ports)) {
1489 min_ref_count_val = eq->ref_count;
1490 requested_vector = i;
1494 if (requested_vector < 0) {
1499 prequested_vector = &requested_vector;
1502 if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
1503 dev->flags & MLX4_FLAG_MSI_X) {
1504 set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
1505 snprintf(priv->eq_table.irq_names +
1506 *prequested_vector * MLX4_IRQNAME_SIZE,
1507 MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
1508 *prequested_vector, dev_name(&dev->persist->pdev->dev));
1510 err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
1511 mlx4_msi_x_interrupt, 0,
1512 &priv->eq_table.irq_names[*prequested_vector << 5],
1513 priv->eq_table.eq + *prequested_vector);
1516 clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
1517 *prequested_vector = -1;
1519 #if defined(CONFIG_SMP)
1520 mlx4_set_eq_affinity_hint(priv, *prequested_vector);
1522 eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
1523 priv->eq_table.eq[*prequested_vector].have_irq = 1;
1527 if (!err && *prequested_vector >= 0)
1528 priv->eq_table.eq[*prequested_vector].ref_count++;
1531 mutex_unlock(&priv->msix_ctl.pool_lock);
1533 if (!err && *prequested_vector >= 0)
1534 *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
1540 EXPORT_SYMBOL(mlx4_assign_eq);
1542 int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
1544 struct mlx4_priv *priv = mlx4_priv(dev);
1546 return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
1548 EXPORT_SYMBOL(mlx4_eq_get_irq);
1550 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1552 struct mlx4_priv *priv = mlx4_priv(dev);
1553 int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
1555 mutex_lock(&priv->msix_ctl.pool_lock);
1556 priv->eq_table.eq[eq_vec].ref_count--;
1558 /* once we allocated EQ, we don't release it because it might be binded
1561 mutex_unlock(&priv->msix_ctl.pool_lock);
1563 EXPORT_SYMBOL(mlx4_release_eq);