GNU Linux-libre 4.9.328-gnu1
[releases.git] / drivers / net / ethernet / mellanox / mlx4 / qp.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/gfp.h>
37 #include <linux/export.h>
38
39 #include <linux/mlx4/cmd.h>
40 #include <linux/mlx4/qp.h>
41
42 #include "mlx4.h"
43 #include "icm.h"
44
45 /* QP to support BF should have bits 6,7 cleared */
46 #define MLX4_BF_QP_SKIP_MASK    0xc0
47 #define MLX4_MAX_BF_QP_RANGE    0x40
48
49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
50 {
51         struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
52         struct mlx4_qp *qp;
53
54         spin_lock(&qp_table->lock);
55
56         qp = __mlx4_qp_lookup(dev, qpn);
57         if (qp)
58                 atomic_inc(&qp->refcount);
59
60         spin_unlock(&qp_table->lock);
61
62         if (!qp) {
63                 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
64                 return;
65         }
66
67         qp->event(qp, event_type);
68
69         if (atomic_dec_and_test(&qp->refcount))
70                 complete(&qp->free);
71 }
72
73 /* used for INIT/CLOSE port logic */
74 static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
75 {
76         /* this procedure is called after we already know we are on the master */
77         /* qp0 is either the proxy qp0, or the real qp0 */
78         u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
79         *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
80
81         *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
82                 qp->qpn <= dev->phys_caps.base_sqpn + 1;
83
84         return *real_qp0 || *proxy_qp0;
85 }
86
87 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
88                      enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
89                      struct mlx4_qp_context *context,
90                      enum mlx4_qp_optpar optpar,
91                      int sqd_event, struct mlx4_qp *qp, int native)
92 {
93         static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
94                 [MLX4_QP_STATE_RST] = {
95                         [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
96                         [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
97                         [MLX4_QP_STATE_INIT]    = MLX4_CMD_RST2INIT_QP,
98                 },
99                 [MLX4_QP_STATE_INIT]  = {
100                         [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
101                         [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
102                         [MLX4_QP_STATE_INIT]    = MLX4_CMD_INIT2INIT_QP,
103                         [MLX4_QP_STATE_RTR]     = MLX4_CMD_INIT2RTR_QP,
104                 },
105                 [MLX4_QP_STATE_RTR]   = {
106                         [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
107                         [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
108                         [MLX4_QP_STATE_RTS]     = MLX4_CMD_RTR2RTS_QP,
109                 },
110                 [MLX4_QP_STATE_RTS]   = {
111                         [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
112                         [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
113                         [MLX4_QP_STATE_RTS]     = MLX4_CMD_RTS2RTS_QP,
114                         [MLX4_QP_STATE_SQD]     = MLX4_CMD_RTS2SQD_QP,
115                 },
116                 [MLX4_QP_STATE_SQD] = {
117                         [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
118                         [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
119                         [MLX4_QP_STATE_RTS]     = MLX4_CMD_SQD2RTS_QP,
120                         [MLX4_QP_STATE_SQD]     = MLX4_CMD_SQD2SQD_QP,
121                 },
122                 [MLX4_QP_STATE_SQER] = {
123                         [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
124                         [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
125                         [MLX4_QP_STATE_RTS]     = MLX4_CMD_SQERR2RTS_QP,
126                 },
127                 [MLX4_QP_STATE_ERR] = {
128                         [MLX4_QP_STATE_RST]     = MLX4_CMD_2RST_QP,
129                         [MLX4_QP_STATE_ERR]     = MLX4_CMD_2ERR_QP,
130                 }
131         };
132
133         struct mlx4_priv *priv = mlx4_priv(dev);
134         struct mlx4_cmd_mailbox *mailbox;
135         int ret = 0;
136         int real_qp0 = 0;
137         int proxy_qp0 = 0;
138         u8 port;
139
140         if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
141             !op[cur_state][new_state])
142                 return -EINVAL;
143
144         if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
145                 ret = mlx4_cmd(dev, 0, qp->qpn, 2,
146                         MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
147                 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
148                     cur_state != MLX4_QP_STATE_RST &&
149                     is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
150                         port = (qp->qpn & 1) + 1;
151                         if (proxy_qp0)
152                                 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
153                         else
154                                 priv->mfunc.master.qp0_state[port].qp0_active = 0;
155                 }
156                 return ret;
157         }
158
159         mailbox = mlx4_alloc_cmd_mailbox(dev);
160         if (IS_ERR(mailbox))
161                 return PTR_ERR(mailbox);
162
163         if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
164                 u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
165                 context->mtt_base_addr_h = mtt_addr >> 32;
166                 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
167                 context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
168         }
169
170         if ((cur_state == MLX4_QP_STATE_RTR) &&
171             (new_state == MLX4_QP_STATE_RTS) &&
172             dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
173                 context->roce_entropy =
174                         cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn));
175
176         *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
177         memcpy(mailbox->buf + 8, context, sizeof *context);
178
179         ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
180                 cpu_to_be32(qp->qpn);
181
182         ret = mlx4_cmd(dev, mailbox->dma,
183                        qp->qpn | (!!sqd_event << 31),
184                        new_state == MLX4_QP_STATE_RST ? 2 : 0,
185                        op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
186
187         if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
188                 port = (qp->qpn & 1) + 1;
189                 if (cur_state != MLX4_QP_STATE_ERR &&
190                     cur_state != MLX4_QP_STATE_RST &&
191                     new_state == MLX4_QP_STATE_ERR) {
192                         if (proxy_qp0)
193                                 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
194                         else
195                                 priv->mfunc.master.qp0_state[port].qp0_active = 0;
196                 } else if (new_state == MLX4_QP_STATE_RTR) {
197                         if (proxy_qp0)
198                                 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
199                         else
200                                 priv->mfunc.master.qp0_state[port].qp0_active = 1;
201                 }
202         }
203
204         mlx4_free_cmd_mailbox(dev, mailbox);
205         return ret;
206 }
207
208 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
209                    enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
210                    struct mlx4_qp_context *context,
211                    enum mlx4_qp_optpar optpar,
212                    int sqd_event, struct mlx4_qp *qp)
213 {
214         return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
215                                 optpar, sqd_event, qp, 0);
216 }
217 EXPORT_SYMBOL_GPL(mlx4_qp_modify);
218
219 int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
220                             int *base, u8 flags)
221 {
222         u32 uid;
223         int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
224
225         struct mlx4_priv *priv = mlx4_priv(dev);
226         struct mlx4_qp_table *qp_table = &priv->qp_table;
227
228         if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
229                 return -ENOMEM;
230
231         uid = MLX4_QP_TABLE_ZONE_GENERAL;
232         if (flags & (u8)MLX4_RESERVE_A0_QP) {
233                 if (bf_qp)
234                         uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
235                 else
236                         uid = MLX4_QP_TABLE_ZONE_RSS;
237         }
238
239         *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
240                                         bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
241         if (*base == -1)
242                 return -ENOMEM;
243
244         return 0;
245 }
246
247 int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
248                           int *base, u8 flags)
249 {
250         u64 in_param = 0;
251         u64 out_param;
252         int err;
253
254         /* Turn off all unsupported QP allocation flags */
255         flags &= dev->caps.alloc_res_qp_mask;
256
257         if (mlx4_is_mfunc(dev)) {
258                 set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
259                 set_param_h(&in_param, align);
260                 err = mlx4_cmd_imm(dev, in_param, &out_param,
261                                    RES_QP, RES_OP_RESERVE,
262                                    MLX4_CMD_ALLOC_RES,
263                                    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
264                 if (err)
265                         return err;
266
267                 *base = get_param_l(&out_param);
268                 return 0;
269         }
270         return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
271 }
272 EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
273
274 void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
275 {
276         struct mlx4_priv *priv = mlx4_priv(dev);
277         struct mlx4_qp_table *qp_table = &priv->qp_table;
278
279         if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
280                 return;
281         mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
282 }
283
284 void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
285 {
286         u64 in_param = 0;
287         int err;
288
289         if (!cnt)
290                 return;
291
292         if (mlx4_is_mfunc(dev)) {
293                 set_param_l(&in_param, base_qpn);
294                 set_param_h(&in_param, cnt);
295                 err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
296                                MLX4_CMD_FREE_RES,
297                                MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
298                 if (err) {
299                         mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
300                                   base_qpn, cnt);
301                 }
302         } else
303                  __mlx4_qp_release_range(dev, base_qpn, cnt);
304 }
305 EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
306
307 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
308 {
309         struct mlx4_priv *priv = mlx4_priv(dev);
310         struct mlx4_qp_table *qp_table = &priv->qp_table;
311         int err;
312
313         err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
314         if (err)
315                 goto err_out;
316
317         err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
318         if (err)
319                 goto err_put_qp;
320
321         err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
322         if (err)
323                 goto err_put_auxc;
324
325         err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
326         if (err)
327                 goto err_put_altc;
328
329         err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
330         if (err)
331                 goto err_put_rdmarc;
332
333         return 0;
334
335 err_put_rdmarc:
336         mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
337
338 err_put_altc:
339         mlx4_table_put(dev, &qp_table->altc_table, qpn);
340
341 err_put_auxc:
342         mlx4_table_put(dev, &qp_table->auxc_table, qpn);
343
344 err_put_qp:
345         mlx4_table_put(dev, &qp_table->qp_table, qpn);
346
347 err_out:
348         return err;
349 }
350
351 static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
352 {
353         u64 param = 0;
354
355         if (mlx4_is_mfunc(dev)) {
356                 set_param_l(&param, qpn);
357                 return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
358                                     MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
359                                     MLX4_CMD_WRAPPED);
360         }
361         return __mlx4_qp_alloc_icm(dev, qpn, gfp);
362 }
363
364 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
365 {
366         struct mlx4_priv *priv = mlx4_priv(dev);
367         struct mlx4_qp_table *qp_table = &priv->qp_table;
368
369         mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
370         mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
371         mlx4_table_put(dev, &qp_table->altc_table, qpn);
372         mlx4_table_put(dev, &qp_table->auxc_table, qpn);
373         mlx4_table_put(dev, &qp_table->qp_table, qpn);
374 }
375
376 static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
377 {
378         u64 in_param = 0;
379
380         if (mlx4_is_mfunc(dev)) {
381                 set_param_l(&in_param, qpn);
382                 if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
383                              MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
384                              MLX4_CMD_WRAPPED))
385                         mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
386         } else
387                 __mlx4_qp_free_icm(dev, qpn);
388 }
389
390 struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
391 {
392         struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
393         struct mlx4_qp *qp;
394
395         spin_lock_irq(&qp_table->lock);
396
397         qp = __mlx4_qp_lookup(dev, qpn);
398
399         spin_unlock_irq(&qp_table->lock);
400         return qp;
401 }
402
403 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
404 {
405         struct mlx4_priv *priv = mlx4_priv(dev);
406         struct mlx4_qp_table *qp_table = &priv->qp_table;
407         int err;
408
409         if (!qpn)
410                 return -EINVAL;
411
412         qp->qpn = qpn;
413
414         err = mlx4_qp_alloc_icm(dev, qpn, gfp);
415         if (err)
416                 return err;
417
418         spin_lock_irq(&qp_table->lock);
419         err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
420                                 (dev->caps.num_qps - 1), qp);
421         spin_unlock_irq(&qp_table->lock);
422         if (err)
423                 goto err_icm;
424
425         atomic_set(&qp->refcount, 1);
426         init_completion(&qp->free);
427
428         return 0;
429
430 err_icm:
431         mlx4_qp_free_icm(dev, qpn);
432         return err;
433 }
434
435 EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
436
437 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
438                    enum mlx4_update_qp_attr attr,
439                    struct mlx4_update_qp_params *params)
440 {
441         struct mlx4_cmd_mailbox *mailbox;
442         struct mlx4_update_qp_context *cmd;
443         u64 pri_addr_path_mask = 0;
444         u64 qp_mask = 0;
445         int err = 0;
446
447         if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
448                 return -EINVAL;
449
450         mailbox = mlx4_alloc_cmd_mailbox(dev);
451         if (IS_ERR(mailbox))
452                 return PTR_ERR(mailbox);
453
454         cmd = (struct mlx4_update_qp_context *)mailbox->buf;
455
456         if (attr & MLX4_UPDATE_QP_SMAC) {
457                 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
458                 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
459         }
460
461         if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
462                 if (!(dev->caps.flags2
463                       & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
464                         mlx4_warn(dev,
465                                   "Trying to set src check LB, but it isn't supported\n");
466                         err = -ENOTSUPP;
467                         goto out;
468                 }
469                 pri_addr_path_mask |=
470                         1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
471                 if (params->flags &
472                     MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
473                         cmd->qp_context.pri_path.fl |=
474                                 MLX4_FL_ETH_SRC_CHECK_MC_LB;
475                 }
476         }
477
478         if (attr & MLX4_UPDATE_QP_VSD) {
479                 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
480                 if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
481                         cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
482         }
483
484         if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
485                 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
486                 cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
487         }
488
489         if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
490                 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
491                         mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
492                         err = -EOPNOTSUPP;
493                         goto out;
494                 }
495
496                 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
497                 cmd->qp_context.qos_vport = params->qos_vport;
498         }
499
500         cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
501         cmd->qp_mask = cpu_to_be64(qp_mask);
502
503         err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
504                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
505                        MLX4_CMD_NATIVE);
506 out:
507         mlx4_free_cmd_mailbox(dev, mailbox);
508         return err;
509 }
510 EXPORT_SYMBOL_GPL(mlx4_update_qp);
511
512 void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
513 {
514         struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
515         unsigned long flags;
516
517         spin_lock_irqsave(&qp_table->lock, flags);
518         radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
519         spin_unlock_irqrestore(&qp_table->lock, flags);
520 }
521 EXPORT_SYMBOL_GPL(mlx4_qp_remove);
522
523 void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
524 {
525         if (atomic_dec_and_test(&qp->refcount))
526                 complete(&qp->free);
527         wait_for_completion(&qp->free);
528
529         mlx4_qp_free_icm(dev, qp->qpn);
530 }
531 EXPORT_SYMBOL_GPL(mlx4_qp_free);
532
533 static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
534 {
535         return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
536                         MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
537 }
538
539 #define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
540 #define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
541 #define MLX4_QP_TABLE_RAW_ETH_SIZE     256
542
543 static int mlx4_create_zones(struct mlx4_dev *dev,
544                              u32 reserved_bottom_general,
545                              u32 reserved_top_general,
546                              u32 reserved_bottom_rss,
547                              u32 start_offset_rss,
548                              u32 max_table_offset)
549 {
550         struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
551         struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
552         int bitmap_initialized = 0;
553         u32 last_offset;
554         int k;
555         int err;
556
557         qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
558
559         if (NULL == qp_table->zones)
560                 return -ENOMEM;
561
562         bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
563
564         if (NULL == bitmap) {
565                 err = -ENOMEM;
566                 goto free_zone;
567         }
568
569         err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
570                                (1 << 23) - 1, reserved_bottom_general,
571                                reserved_top_general);
572
573         if (err)
574                 goto free_bitmap;
575
576         ++bitmap_initialized;
577
578         err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
579                                 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
580                                 MLX4_ZONE_USE_RR, 0,
581                                 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
582
583         if (err)
584                 goto free_bitmap;
585
586         err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
587                                reserved_bottom_rss,
588                                reserved_bottom_rss - 1,
589                                dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
590                                reserved_bottom_rss - start_offset_rss);
591
592         if (err)
593                 goto free_bitmap;
594
595         ++bitmap_initialized;
596
597         err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
598                                 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
599                                 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
600                                 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
601                                 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
602
603         if (err)
604                 goto free_bitmap;
605
606         last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
607         /*  We have a single zone for the A0 steering QPs area of the FW. This area
608          *  needs to be split into subareas. One set of subareas is for RSS QPs
609          *  (in which qp number bits 6 and/or 7 are set); the other set of subareas
610          *  is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
611          *  Currently, the values returned by the FW (A0 steering area starting qp number
612          *  and A0 steering area size) are such that there are only two subareas -- one
613          *  for RSS and one for RAW_ETH.
614          */
615         for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
616              k++) {
617                 int size;
618                 u32 offset = start_offset_rss;
619                 u32 bf_mask;
620                 u32 requested_size;
621
622                 /* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
623                  * a mask of all LSB bits set until (and not including) the first
624                  * set bit of  MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
625                  * is 0xc0, bf_mask will be 0x3f.
626                  */
627                 bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
628                 requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
629
630                 if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
631                      ((int)(max_table_offset - last_offset)) >=
632                      roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
633                     (!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
634                      !((last_offset + requested_size - 1) &
635                        MLX4_BF_QP_SKIP_MASK)))
636                         size = requested_size;
637                 else {
638                         u32 candidate_offset =
639                                 (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
640
641                         if (last_offset & MLX4_BF_QP_SKIP_MASK)
642                                 last_offset = candidate_offset;
643
644                         /* From this point, the BF bits are 0 */
645
646                         if (last_offset > max_table_offset) {
647                                 /* need to skip */
648                                 size = -1;
649                         } else {
650                                 size = min3(max_table_offset - last_offset,
651                                             bf_mask - (last_offset & bf_mask),
652                                             requested_size);
653                                 if (size < requested_size) {
654                                         int candidate_size;
655
656                                         candidate_size = min3(
657                                                 max_table_offset - candidate_offset,
658                                                 bf_mask - (last_offset & bf_mask),
659                                                 requested_size);
660
661                                         /*  We will not take this path if last_offset was
662                                          *  already set above to candidate_offset
663                                          */
664                                         if (candidate_size > size) {
665                                                 last_offset = candidate_offset;
666                                                 size = candidate_size;
667                                         }
668                                 }
669                         }
670                 }
671
672                 if (size > 0) {
673                         /* mlx4_bitmap_alloc_range will find a contiguous range of "size"
674                          * QPs in which both bits 6 and 7 are zero, because we pass it the
675                          * MLX4_BF_SKIP_MASK).
676                          */
677                         offset = mlx4_bitmap_alloc_range(
678                                         *bitmap + MLX4_QP_TABLE_ZONE_RSS,
679                                         size, 1,
680                                         MLX4_BF_QP_SKIP_MASK);
681
682                         if (offset == (u32)-1) {
683                                 err = -ENOMEM;
684                                 break;
685                         }
686
687                         last_offset = offset + size;
688
689                         err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
690                                                roundup_pow_of_two(size) - 1, 0,
691                                                roundup_pow_of_two(size) - size);
692                 } else {
693                         /* Add an empty bitmap, we'll allocate from different zones (since
694                          * at least one is reserved)
695                          */
696                         err = mlx4_bitmap_init(*bitmap + k, 1,
697                                                MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
698                                                0);
699                         mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
700                 }
701
702                 if (err)
703                         break;
704
705                 ++bitmap_initialized;
706
707                 err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
708                                         MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
709                                         MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
710                                         MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
711                                         offset, qp_table->zones_uids + k);
712
713                 if (err)
714                         break;
715         }
716
717         if (err)
718                 goto free_bitmap;
719
720         qp_table->bitmap_gen = *bitmap;
721
722         return err;
723
724 free_bitmap:
725         for (k = 0; k < bitmap_initialized; k++)
726                 mlx4_bitmap_cleanup(*bitmap + k);
727         kfree(bitmap);
728 free_zone:
729         mlx4_zone_allocator_destroy(qp_table->zones);
730         return err;
731 }
732
733 static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
734 {
735         struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
736
737         if (qp_table->zones) {
738                 int i;
739
740                 for (i = 0;
741                      i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
742                      i++) {
743                         struct mlx4_bitmap *bitmap =
744                                 mlx4_zone_get_bitmap(qp_table->zones,
745                                                      qp_table->zones_uids[i]);
746
747                         mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
748                         if (NULL == bitmap)
749                                 continue;
750
751                         mlx4_bitmap_cleanup(bitmap);
752                 }
753                 mlx4_zone_allocator_destroy(qp_table->zones);
754                 kfree(qp_table->bitmap_gen);
755                 qp_table->bitmap_gen = NULL;
756                 qp_table->zones = NULL;
757         }
758 }
759
760 int mlx4_init_qp_table(struct mlx4_dev *dev)
761 {
762         struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
763         int err;
764         int reserved_from_top = 0;
765         int reserved_from_bot;
766         int k;
767         int fixed_reserved_from_bot_rv = 0;
768         int bottom_reserved_for_rss_bitmap;
769         u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
770                         dev->caps.dmfs_high_rate_qpn_range;
771
772         spin_lock_init(&qp_table->lock);
773         INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
774         if (mlx4_is_slave(dev))
775                 return 0;
776
777         /* We reserve 2 extra QPs per port for the special QPs.  The
778          * block of special QPs must be aligned to a multiple of 8, so
779          * round up.
780          *
781          * We also reserve the MSB of the 24-bit QP number to indicate
782          * that a QP is an XRC QP.
783          */
784         for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
785                 fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
786
787         if (fixed_reserved_from_bot_rv < max_table_offset)
788                 fixed_reserved_from_bot_rv = max_table_offset;
789
790         /* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
791         bottom_reserved_for_rss_bitmap =
792                 roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
793         dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
794
795         {
796                 int sort[MLX4_NUM_QP_REGION];
797                 int i, j;
798                 int last_base = dev->caps.num_qps;
799
800                 for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
801                         sort[i] = i;
802
803                 for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
804                         for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
805                                 if (dev->caps.reserved_qps_cnt[sort[j]] >
806                                     dev->caps.reserved_qps_cnt[sort[j - 1]])
807                                         swap(sort[j], sort[j - 1]);
808                         }
809                 }
810
811                 for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
812                         last_base -= dev->caps.reserved_qps_cnt[sort[i]];
813                         dev->caps.reserved_qps_base[sort[i]] = last_base;
814                         reserved_from_top +=
815                                 dev->caps.reserved_qps_cnt[sort[i]];
816                 }
817         }
818
819        /* Reserve 8 real SQPs in both native and SRIOV modes.
820         * In addition, in SRIOV mode, reserve 8 proxy SQPs per function
821         * (for all PFs and VFs), and 8 corresponding tunnel QPs.
822         * Each proxy SQP works opposite its own tunnel QP.
823         *
824         * The QPs are arranged as follows:
825         * a. 8 real SQPs
826         * b. All the proxy SQPs (8 per function)
827         * c. All the tunnel QPs (8 per function)
828         */
829         reserved_from_bot = mlx4_num_reserved_sqps(dev);
830         if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
831                 mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
832                 return -EINVAL;
833         }
834
835         err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
836                                 bottom_reserved_for_rss_bitmap,
837                                 fixed_reserved_from_bot_rv,
838                                 max_table_offset);
839
840         if (err)
841                 return err;
842
843         if (mlx4_is_mfunc(dev)) {
844                 /* for PPF use */
845                 dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
846                 dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
847
848                 /* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
849                  * since the PF does not call mlx4_slave_caps */
850                 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
851                 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
852                 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
853                 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
854
855                 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
856                     !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
857                         err = -ENOMEM;
858                         goto err_mem;
859                 }
860
861                 for (k = 0; k < dev->caps.num_ports; k++) {
862                         dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
863                                 8 * mlx4_master_func_num(dev) + k;
864                         dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
865                         dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
866                                 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
867                         dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
868                 }
869         }
870
871
872         err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
873         if (err)
874                 goto err_mem;
875
876         return err;
877
878 err_mem:
879         kfree(dev->caps.qp0_tunnel);
880         kfree(dev->caps.qp0_proxy);
881         kfree(dev->caps.qp1_tunnel);
882         kfree(dev->caps.qp1_proxy);
883         dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
884                 dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
885         mlx4_cleanup_qp_zones(dev);
886         return err;
887 }
888
889 void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
890 {
891         if (mlx4_is_slave(dev))
892                 return;
893
894         mlx4_CONF_SPECIAL_QP(dev, 0);
895
896         mlx4_cleanup_qp_zones(dev);
897 }
898
899 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
900                   struct mlx4_qp_context *context)
901 {
902         struct mlx4_cmd_mailbox *mailbox;
903         int err;
904
905         mailbox = mlx4_alloc_cmd_mailbox(dev);
906         if (IS_ERR(mailbox))
907                 return PTR_ERR(mailbox);
908
909         err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
910                            MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
911                            MLX4_CMD_WRAPPED);
912         if (!err)
913                 memcpy(context, mailbox->buf + 8, sizeof *context);
914
915         mlx4_free_cmd_mailbox(dev, mailbox);
916         return err;
917 }
918 EXPORT_SYMBOL_GPL(mlx4_qp_query);
919
920 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
921                      struct mlx4_qp_context *context,
922                      struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
923 {
924         int err;
925         int i;
926         enum mlx4_qp_state states[] = {
927                 MLX4_QP_STATE_RST,
928                 MLX4_QP_STATE_INIT,
929                 MLX4_QP_STATE_RTR,
930                 MLX4_QP_STATE_RTS
931         };
932
933         for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
934                 context->flags &= cpu_to_be32(~(0xf << 28));
935                 context->flags |= cpu_to_be32(states[i + 1] << 28);
936                 if (states[i + 1] != MLX4_QP_STATE_RTR)
937                         context->params2 &= ~MLX4_QP_BIT_FPP;
938                 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
939                                      context, 0, 0, qp);
940                 if (err) {
941                         mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
942                                  states[i + 1], err);
943                         return err;
944                 }
945
946                 *qp_state = states[i + 1];
947         }
948
949         return 0;
950 }
951 EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
952
953 u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn)
954 {
955         struct mlx4_qp_context context;
956         struct mlx4_qp qp;
957         int err;
958
959         qp.qpn = qpn;
960         err = mlx4_qp_query(dev, &qp, &context);
961         if (!err) {
962                 u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff;
963                 u16 folded_dst = folded_qp(dest_qpn);
964                 u16 folded_src = folded_qp(qpn);
965
966                 return (dest_qpn != qpn) ?
967                         ((folded_dst ^ folded_src) | 0xC000) :
968                         folded_src | 0xC000;
969         }
970         return 0xdead;
971 }