2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/errno.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/kernel.h>
39 #include <linux/vmalloc.h>
41 #include <linux/mlx4/cmd.h>
46 static u32 mlx4_buddy_alloc(struct mlx4_buddy *buddy, int order)
52 spin_lock(&buddy->lock);
54 for (o = order; o <= buddy->max_order; ++o)
55 if (buddy->num_free[o]) {
56 m = 1 << (buddy->max_order - o);
57 seg = find_first_bit(buddy->bits[o], m);
62 spin_unlock(&buddy->lock);
66 clear_bit(seg, buddy->bits[o]);
72 set_bit(seg ^ 1, buddy->bits[o]);
76 spin_unlock(&buddy->lock);
83 static void mlx4_buddy_free(struct mlx4_buddy *buddy, u32 seg, int order)
87 spin_lock(&buddy->lock);
89 while (test_bit(seg ^ 1, buddy->bits[order])) {
90 clear_bit(seg ^ 1, buddy->bits[order]);
91 --buddy->num_free[order];
96 set_bit(seg, buddy->bits[order]);
97 ++buddy->num_free[order];
99 spin_unlock(&buddy->lock);
102 static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
106 buddy->max_order = max_order;
107 spin_lock_init(&buddy->lock);
109 buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *),
111 buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free),
113 if (!buddy->bits || !buddy->num_free)
116 for (i = 0; i <= buddy->max_order; ++i) {
117 s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
118 buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
123 set_bit(0, buddy->bits[buddy->max_order]);
124 buddy->num_free[buddy->max_order] = 1;
129 for (i = 0; i <= buddy->max_order; ++i)
130 kvfree(buddy->bits[i]);
134 kfree(buddy->num_free);
139 static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
143 for (i = 0; i <= buddy->max_order; ++i)
144 kvfree(buddy->bits[i]);
147 kfree(buddy->num_free);
150 u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
152 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
157 seg_order = max_t(int, order - log_mtts_per_seg, 0);
159 seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
163 offset = seg * (1 << log_mtts_per_seg);
165 if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
166 offset + (1 << order) - 1)) {
167 mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
174 static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
180 if (mlx4_is_mfunc(dev)) {
181 set_param_l(&in_param, order);
182 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
183 RES_OP_RESERVE_AND_MAP,
185 MLX4_CMD_TIME_CLASS_A,
189 return get_param_l(&out_param);
191 return __mlx4_alloc_mtt_range(dev, order);
194 int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
195 struct mlx4_mtt *mtt)
201 mtt->page_shift = MLX4_ICM_PAGE_SHIFT;
204 mtt->page_shift = page_shift;
206 for (mtt->order = 0, i = 1; i < npages; i <<= 1)
209 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
210 if (mtt->offset == -1)
215 EXPORT_SYMBOL_GPL(mlx4_mtt_init);
217 void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
221 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
223 seg_order = max_t(int, order - log_mtts_per_seg, 0);
224 first_seg = offset / (1 << log_mtts_per_seg);
226 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
227 mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
228 offset + (1 << order) - 1);
231 static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
236 if (mlx4_is_mfunc(dev)) {
237 set_param_l(&in_param, offset);
238 set_param_h(&in_param, order);
239 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
241 MLX4_CMD_TIME_CLASS_A,
244 mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
248 __mlx4_free_mtt_range(dev, offset, order);
251 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
256 mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
258 EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
260 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
262 return (u64) mtt->offset * dev->caps.mtt_entry_sz;
264 EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
266 static u32 hw_index_to_key(u32 ind)
268 return (ind >> 24) | (ind << 8);
271 static u32 key_to_hw_index(u32 key)
273 return (key << 24) | (key >> 8);
276 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
279 return mlx4_cmd(dev, mailbox->dma, mpt_index,
280 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
284 static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
287 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
288 !mailbox, MLX4_CMD_HW2SW_MPT,
289 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
292 /* Must protect against concurrent access */
293 int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
294 struct mlx4_mpt_entry ***mpt_entry)
297 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
298 struct mlx4_cmd_mailbox *mailbox = NULL;
300 if (mmr->enabled != MLX4_MPT_EN_HW)
303 err = mlx4_HW2SW_MPT(dev, NULL, key);
305 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
306 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
310 mmr->enabled = MLX4_MPT_EN_SW;
312 if (!mlx4_is_mfunc(dev)) {
313 **mpt_entry = mlx4_table_find(
314 &mlx4_priv(dev)->mr_table.dmpt_table,
317 mailbox = mlx4_alloc_cmd_mailbox(dev);
319 return PTR_ERR(mailbox);
321 err = mlx4_cmd_box(dev, 0, mailbox->dma, key,
322 0, MLX4_CMD_QUERY_MPT,
323 MLX4_CMD_TIME_CLASS_B,
328 *mpt_entry = (struct mlx4_mpt_entry **)&mailbox->buf;
331 if (!(*mpt_entry) || !(**mpt_entry)) {
339 mlx4_free_cmd_mailbox(dev, mailbox);
342 EXPORT_SYMBOL_GPL(mlx4_mr_hw_get_mpt);
344 int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
345 struct mlx4_mpt_entry **mpt_entry)
349 if (!mlx4_is_mfunc(dev)) {
350 /* Make sure any changes to this entry are flushed */
353 *(u8 *)(*mpt_entry) = MLX4_MPT_STATUS_HW;
355 /* Make sure the new status is written */
358 err = mlx4_SYNC_TPT(dev);
360 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
362 struct mlx4_cmd_mailbox *mailbox =
363 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
366 (*mpt_entry)->lkey = 0;
367 err = mlx4_SW2HW_MPT(dev, mailbox, key);
371 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
372 mmr->enabled = MLX4_MPT_EN_HW;
376 EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
378 void mlx4_mr_hw_put_mpt(struct mlx4_dev *dev,
379 struct mlx4_mpt_entry **mpt_entry)
381 if (mlx4_is_mfunc(dev)) {
382 struct mlx4_cmd_mailbox *mailbox =
383 container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
385 mlx4_free_cmd_mailbox(dev, mailbox);
388 EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
390 int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
393 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
394 /* The wrapper function will put the slave's id here */
395 if (mlx4_is_mfunc(dev))
396 pd_flags &= ~MLX4_MPT_PD_VF_MASK;
398 mpt_entry->pd_flags = cpu_to_be32(pd_flags |
399 (pdn & MLX4_MPT_PD_MASK)
400 | MLX4_MPT_PD_FLAG_EN_INV);
403 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_pd);
405 int mlx4_mr_hw_change_access(struct mlx4_dev *dev,
406 struct mlx4_mpt_entry *mpt_entry,
409 u32 flags = (be32_to_cpu(mpt_entry->flags) & ~MLX4_PERM_MASK) |
410 (access & MLX4_PERM_MASK);
412 mpt_entry->flags = cpu_to_be32(flags);
415 EXPORT_SYMBOL_GPL(mlx4_mr_hw_change_access);
417 static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
418 u64 iova, u64 size, u32 access, int npages,
419 int page_shift, struct mlx4_mr *mr)
425 mr->enabled = MLX4_MPT_DISABLED;
426 mr->key = hw_index_to_key(mridx);
428 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
431 static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
432 struct mlx4_cmd_mailbox *mailbox,
435 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
436 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
439 int __mlx4_mpt_reserve(struct mlx4_dev *dev)
441 struct mlx4_priv *priv = mlx4_priv(dev);
443 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
446 static int mlx4_mpt_reserve(struct mlx4_dev *dev)
450 if (mlx4_is_mfunc(dev)) {
451 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
453 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
455 return get_param_l(&out_param);
457 return __mlx4_mpt_reserve(dev);
460 void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
462 struct mlx4_priv *priv = mlx4_priv(dev);
464 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index, MLX4_NO_RR);
467 static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
471 if (mlx4_is_mfunc(dev)) {
472 set_param_l(&in_param, index);
473 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
475 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
476 mlx4_warn(dev, "Failed to release mr index:%d\n",
480 __mlx4_mpt_release(dev, index);
483 int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
485 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
487 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
490 static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
494 if (mlx4_is_mfunc(dev)) {
495 set_param_l(¶m, index);
496 return mlx4_cmd_imm(dev, param, ¶m, RES_MPT, RES_OP_MAP_ICM,
498 MLX4_CMD_TIME_CLASS_A,
501 return __mlx4_mpt_alloc_icm(dev, index);
504 void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
506 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
508 mlx4_table_put(dev, &mr_table->dmpt_table, index);
511 static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
515 if (mlx4_is_mfunc(dev)) {
516 set_param_l(&in_param, index);
517 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
518 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
520 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
524 return __mlx4_mpt_free_icm(dev, index);
527 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
528 int npages, int page_shift, struct mlx4_mr *mr)
533 index = mlx4_mpt_reserve(dev);
537 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
538 access, npages, page_shift, mr);
540 mlx4_mpt_release(dev, index);
544 EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
546 static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
550 if (mr->enabled == MLX4_MPT_EN_HW) {
551 err = mlx4_HW2SW_MPT(dev, NULL,
552 key_to_hw_index(mr->key) &
553 (dev->caps.num_mpts - 1));
555 mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
560 mr->enabled = MLX4_MPT_EN_SW;
562 mlx4_mtt_cleanup(dev, &mr->mtt);
567 int mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
571 ret = mlx4_mr_free_reserved(dev, mr);
575 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
576 mlx4_mpt_release(dev, key_to_hw_index(mr->key));
580 EXPORT_SYMBOL_GPL(mlx4_mr_free);
582 void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
584 mlx4_mtt_cleanup(dev, &mr->mtt);
587 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
589 int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
590 u64 iova, u64 size, int npages,
591 int page_shift, struct mlx4_mpt_entry *mpt_entry)
595 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
599 mpt_entry->start = cpu_to_be64(iova);
600 mpt_entry->length = cpu_to_be64(size);
601 mpt_entry->entity_size = cpu_to_be32(page_shift);
602 mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE |
603 MLX4_MPT_FLAG_SW_OWNS));
604 if (mr->mtt.order < 0) {
605 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
606 mpt_entry->mtt_addr = 0;
608 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
610 if (mr->mtt.page_shift == 0)
611 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
613 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
614 /* fast register MR in free state */
615 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
616 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
617 MLX4_MPT_PD_FLAG_RAE);
619 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
621 mr->enabled = MLX4_MPT_EN_SW;
625 EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_write);
627 int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
629 struct mlx4_cmd_mailbox *mailbox;
630 struct mlx4_mpt_entry *mpt_entry;
633 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
637 mailbox = mlx4_alloc_cmd_mailbox(dev);
638 if (IS_ERR(mailbox)) {
639 err = PTR_ERR(mailbox);
642 mpt_entry = mailbox->buf;
643 mpt_entry->flags = cpu_to_be32(MLX4_MPT_FLAG_MIO |
644 MLX4_MPT_FLAG_REGION |
647 mpt_entry->key = cpu_to_be32(key_to_hw_index(mr->key));
648 mpt_entry->pd_flags = cpu_to_be32(mr->pd | MLX4_MPT_PD_FLAG_EN_INV);
649 mpt_entry->start = cpu_to_be64(mr->iova);
650 mpt_entry->length = cpu_to_be64(mr->size);
651 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
653 if (mr->mtt.order < 0) {
654 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
655 mpt_entry->mtt_addr = 0;
657 mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
661 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
662 /* fast register MR in free state */
663 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
664 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
665 MLX4_MPT_PD_FLAG_RAE);
666 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
668 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
671 err = mlx4_SW2HW_MPT(dev, mailbox,
672 key_to_hw_index(mr->key) & (dev->caps.num_mpts - 1));
674 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
677 mr->enabled = MLX4_MPT_EN_HW;
679 mlx4_free_cmd_mailbox(dev, mailbox);
684 mlx4_free_cmd_mailbox(dev, mailbox);
687 mlx4_mpt_free_icm(dev, key_to_hw_index(mr->key));
690 EXPORT_SYMBOL_GPL(mlx4_mr_enable);
692 static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
693 int start_index, int npages, u64 *page_list)
695 struct mlx4_priv *priv = mlx4_priv(dev);
697 dma_addr_t dma_handle;
700 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
701 start_index, &dma_handle);
706 dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle,
707 npages * sizeof(u64), DMA_TO_DEVICE);
709 for (i = 0; i < npages; ++i)
710 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
712 dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle,
713 npages * sizeof(u64), DMA_TO_DEVICE);
718 int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
719 int start_index, int npages, u64 *page_list)
724 int max_mtts_first_page;
726 /* compute how may mtts fit in the first page */
727 mtts_per_page = PAGE_SIZE / sizeof(u64);
728 max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
731 chunk = min_t(int, max_mtts_first_page, npages);
734 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
738 start_index += chunk;
741 chunk = min_t(int, mtts_per_page, npages);
746 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
747 int start_index, int npages, u64 *page_list)
749 struct mlx4_cmd_mailbox *mailbox = NULL;
750 __be64 *inbox = NULL;
758 if (mlx4_is_mfunc(dev)) {
759 mailbox = mlx4_alloc_cmd_mailbox(dev);
761 return PTR_ERR(mailbox);
762 inbox = mailbox->buf;
765 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
767 inbox[0] = cpu_to_be64(mtt->offset + start_index);
769 for (i = 0; i < chunk; ++i)
770 inbox[i + 2] = cpu_to_be64(page_list[i] |
771 MLX4_MTT_FLAG_PRESENT);
772 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
774 mlx4_free_cmd_mailbox(dev, mailbox);
779 start_index += chunk;
782 mlx4_free_cmd_mailbox(dev, mailbox);
786 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
788 EXPORT_SYMBOL_GPL(mlx4_write_mtt);
790 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
791 struct mlx4_buf *buf)
797 page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL);
801 for (i = 0; i < buf->npages; ++i)
803 page_list[i] = buf->direct.map + (i << buf->page_shift);
805 page_list[i] = buf->page_list[i].map;
807 err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
812 EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
814 int mlx4_mw_alloc(struct mlx4_dev *dev, u32 pd, enum mlx4_mw_type type,
819 if ((type == MLX4_MW_TYPE_1 &&
820 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)) ||
821 (type == MLX4_MW_TYPE_2 &&
822 !(dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)))
825 index = mlx4_mpt_reserve(dev);
829 mw->key = hw_index_to_key(index);
832 mw->enabled = MLX4_MPT_DISABLED;
836 EXPORT_SYMBOL_GPL(mlx4_mw_alloc);
838 int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw)
840 struct mlx4_cmd_mailbox *mailbox;
841 struct mlx4_mpt_entry *mpt_entry;
844 err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
848 mailbox = mlx4_alloc_cmd_mailbox(dev);
849 if (IS_ERR(mailbox)) {
850 err = PTR_ERR(mailbox);
853 mpt_entry = mailbox->buf;
855 /* Note that the MLX4_MPT_FLAG_REGION bit in mpt_entry->flags is turned
856 * off, thus creating a memory window and not a memory region.
858 mpt_entry->key = cpu_to_be32(key_to_hw_index(mw->key));
859 mpt_entry->pd_flags = cpu_to_be32(mw->pd);
860 if (mw->type == MLX4_MW_TYPE_2) {
861 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
862 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP);
863 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_EN_INV);
866 err = mlx4_SW2HW_MPT(dev, mailbox,
867 key_to_hw_index(mw->key) &
868 (dev->caps.num_mpts - 1));
870 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
873 mw->enabled = MLX4_MPT_EN_HW;
875 mlx4_free_cmd_mailbox(dev, mailbox);
880 mlx4_free_cmd_mailbox(dev, mailbox);
883 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
886 EXPORT_SYMBOL_GPL(mlx4_mw_enable);
888 void mlx4_mw_free(struct mlx4_dev *dev, struct mlx4_mw *mw)
892 if (mw->enabled == MLX4_MPT_EN_HW) {
893 err = mlx4_HW2SW_MPT(dev, NULL,
894 key_to_hw_index(mw->key) &
895 (dev->caps.num_mpts - 1));
897 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
899 mw->enabled = MLX4_MPT_EN_SW;
902 mlx4_mpt_free_icm(dev, key_to_hw_index(mw->key));
903 mlx4_mpt_release(dev, key_to_hw_index(mw->key));
905 EXPORT_SYMBOL_GPL(mlx4_mw_free);
907 int mlx4_init_mr_table(struct mlx4_dev *dev)
909 struct mlx4_priv *priv = mlx4_priv(dev);
910 struct mlx4_mr_table *mr_table = &priv->mr_table;
913 /* Nothing to do for slaves - all MR handling is forwarded
915 if (mlx4_is_slave(dev))
918 if (!is_power_of_2(dev->caps.num_mpts))
921 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
922 ~0, dev->caps.reserved_mrws, 0);
926 err = mlx4_buddy_init(&mr_table->mtt_buddy,
927 ilog2((u32)dev->caps.num_mtts /
928 (1 << log_mtts_per_seg)));
932 if (dev->caps.reserved_mtts) {
933 priv->reserved_mtts =
934 mlx4_alloc_mtt_range(dev,
935 fls(dev->caps.reserved_mtts - 1));
936 if (priv->reserved_mtts < 0) {
937 mlx4_warn(dev, "MTT table of order %u is too small\n",
938 mr_table->mtt_buddy.max_order);
940 goto err_reserve_mtts;
947 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
950 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
955 void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
957 struct mlx4_priv *priv = mlx4_priv(dev);
958 struct mlx4_mr_table *mr_table = &priv->mr_table;
960 if (mlx4_is_slave(dev))
962 if (priv->reserved_mtts >= 0)
963 mlx4_free_mtt_range(dev, priv->reserved_mtts,
964 fls(dev->caps.reserved_mtts - 1));
965 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
966 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
969 int mlx4_SYNC_TPT(struct mlx4_dev *dev)
971 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT,
972 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
974 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);