2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include <linux/vmalloc.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_device.h"
38 #include "hns_roce_cmd.h"
39 #include "hns_roce_hem.h"
41 static u32 hw_index_to_key(unsigned long ind)
43 return (u32)(ind >> 24) | (ind << 8);
46 unsigned long key_to_hw_index(u32 key)
48 return (key << 24) | (key >> 8);
51 static int hns_roce_hw_create_mpt(struct hns_roce_dev *hr_dev,
52 struct hns_roce_cmd_mailbox *mailbox,
53 unsigned long mpt_index)
55 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
56 HNS_ROCE_CMD_CREATE_MPT,
57 HNS_ROCE_CMD_TIMEOUT_MSECS);
60 int hns_roce_hw_destroy_mpt(struct hns_roce_dev *hr_dev,
61 struct hns_roce_cmd_mailbox *mailbox,
62 unsigned long mpt_index)
64 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
65 mpt_index, !mailbox, HNS_ROCE_CMD_DESTROY_MPT,
66 HNS_ROCE_CMD_TIMEOUT_MSECS);
69 static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
70 u32 pd, u64 iova, u64 size, u32 access)
72 struct ib_device *ibdev = &hr_dev->ib_dev;
73 unsigned long obj = 0;
76 /* Allocate a key for mr from mr_table */
77 err = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &obj);
80 "failed to alloc bitmap for MR key, ret = %d.\n",
85 mr->iova = iova; /* MR va starting addr */
86 mr->size = size; /* MR addr range */
87 mr->pd = pd; /* MR num */
88 mr->access = access; /* MR access permit */
89 mr->enabled = 0; /* MR active status */
90 mr->key = hw_index_to_key(obj); /* MR key */
92 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
94 ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
100 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
104 static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
106 unsigned long obj = key_to_hw_index(mr->key);
108 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
109 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap, obj, BITMAP_NO_RR);
112 static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
113 size_t length, struct ib_udata *udata, u64 start,
116 struct ib_device *ibdev = &hr_dev->ib_dev;
117 bool is_fast = mr->type == MR_TYPE_FRMR;
118 struct hns_roce_buf_attr buf_attr = {};
121 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
122 buf_attr.page_shift = is_fast ? PAGE_SHIFT :
123 hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
124 buf_attr.region[0].size = length;
125 buf_attr.region[0].hopnum = mr->pbl_hop_num;
126 buf_attr.region_count = 1;
127 buf_attr.fixed_page = true;
128 buf_attr.user_access = access;
129 /* fast MR's buffer is alloced before mapping, not at creation */
130 buf_attr.mtt_only = is_fast;
132 err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
133 hr_dev->caps.pbl_ba_pg_sz + HNS_HW_PAGE_SHIFT,
136 ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
138 mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
143 static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
145 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
148 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
149 struct hns_roce_mr *mr)
151 struct ib_device *ibdev = &hr_dev->ib_dev;
155 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
156 key_to_hw_index(mr->key) &
157 (hr_dev->caps.num_mtpts - 1));
159 ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
163 free_mr_pbl(hr_dev, mr);
164 free_mr_key(hr_dev, mr);
167 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
168 struct hns_roce_mr *mr)
171 unsigned long mtpt_idx = key_to_hw_index(mr->key);
172 struct device *dev = hr_dev->dev;
173 struct hns_roce_cmd_mailbox *mailbox;
175 /* Allocate mailbox memory */
176 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
177 if (IS_ERR(mailbox)) {
178 ret = PTR_ERR(mailbox);
182 if (mr->type != MR_TYPE_FRMR)
183 ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
186 ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
188 dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
192 ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
193 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
195 dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
200 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
205 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
210 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
212 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
215 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
216 hr_dev->caps.num_mtpts,
217 hr_dev->caps.num_mtpts - 1,
218 hr_dev->caps.reserved_mrws, 0);
222 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
224 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
226 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
229 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
231 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
232 struct hns_roce_mr *mr;
235 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
237 return ERR_PTR(-ENOMEM);
239 mr->type = MR_TYPE_DMA;
241 /* Allocate memory region key */
242 hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
243 ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, 0, acc);
247 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
251 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
255 free_mr_key(hr_dev, mr);
262 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
263 u64 virt_addr, int access_flags,
264 struct ib_udata *udata)
266 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
267 struct hns_roce_mr *mr;
270 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
272 return ERR_PTR(-ENOMEM);
274 mr->type = MR_TYPE_MR;
275 ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, virt_addr, length,
280 ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, access_flags);
284 ret = hns_roce_mr_enable(hr_dev, mr);
288 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
293 free_mr_pbl(hr_dev, mr);
295 free_mr_key(hr_dev, mr);
301 static int rereg_mr_trans(struct ib_mr *ibmr, int flags,
302 u64 start, u64 length,
303 u64 virt_addr, int mr_access_flags,
304 struct hns_roce_cmd_mailbox *mailbox,
305 u32 pdn, struct ib_udata *udata)
307 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
308 struct ib_device *ibdev = &hr_dev->ib_dev;
309 struct hns_roce_mr *mr = to_hr_mr(ibmr);
312 free_mr_pbl(hr_dev, mr);
313 ret = alloc_mr_pbl(hr_dev, mr, length, udata, start, mr_access_flags);
315 ibdev_err(ibdev, "failed to create mr PBL, ret = %d.\n", ret);
319 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
320 mr_access_flags, virt_addr,
321 length, mailbox->buf);
323 ibdev_err(ibdev, "failed to write mtpt, ret = %d.\n", ret);
324 free_mr_pbl(hr_dev, mr);
330 int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
331 u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
332 struct ib_udata *udata)
334 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
335 struct ib_device *ib_dev = &hr_dev->ib_dev;
336 struct hns_roce_mr *mr = to_hr_mr(ibmr);
337 struct hns_roce_cmd_mailbox *mailbox;
338 unsigned long mtpt_idx;
345 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
347 return PTR_ERR(mailbox);
349 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
350 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
351 HNS_ROCE_CMD_QUERY_MPT,
352 HNS_ROCE_CMD_TIMEOUT_MSECS);
356 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL, mtpt_idx);
358 ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
362 if (flags & IB_MR_REREG_PD)
363 pdn = to_hr_pd(pd)->pdn;
365 if (flags & IB_MR_REREG_TRANS) {
366 ret = rereg_mr_trans(ibmr, flags,
368 virt_addr, mr_access_flags,
369 mailbox, pdn, udata);
373 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
374 mr_access_flags, virt_addr,
375 length, mailbox->buf);
380 ret = hns_roce_hw_create_mpt(hr_dev, mailbox, mtpt_idx);
382 ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
387 if (flags & IB_MR_REREG_ACCESS)
388 mr->access = mr_access_flags;
390 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
395 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
400 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
402 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
403 struct hns_roce_mr *mr = to_hr_mr(ibmr);
406 if (hr_dev->hw->dereg_mr) {
407 ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
409 hns_roce_mr_free(hr_dev, mr);
416 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
419 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
420 struct device *dev = hr_dev->dev;
421 struct hns_roce_mr *mr;
425 if (mr_type != IB_MR_TYPE_MEM_REG)
426 return ERR_PTR(-EINVAL);
428 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
429 dev_err(dev, "max_num_sg larger than %d\n",
430 HNS_ROCE_FRMR_MAX_PA);
431 return ERR_PTR(-EINVAL);
434 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
436 return ERR_PTR(-ENOMEM);
438 mr->type = MR_TYPE_FRMR;
440 /* Allocate memory region key */
441 length = max_num_sg * (1 << PAGE_SHIFT);
442 ret = alloc_mr_key(hr_dev, mr, to_hr_pd(pd)->pdn, 0, length, 0);
446 ret = alloc_mr_pbl(hr_dev, mr, length, NULL, 0, 0);
450 ret = hns_roce_mr_enable(hr_dev, mr);
454 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
455 mr->ibmr.length = length;
460 free_mr_key(hr_dev, mr);
462 free_mr_pbl(hr_dev, mr);
468 static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
470 struct hns_roce_mr *mr = to_hr_mr(ibmr);
472 if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
473 mr->page_list[mr->npages++] = addr;
480 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
481 unsigned int *sg_offset)
483 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
484 struct ib_device *ibdev = &hr_dev->ib_dev;
485 struct hns_roce_mr *mr = to_hr_mr(ibmr);
486 struct hns_roce_mtr *mtr = &mr->pbl_mtr;
490 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
491 sizeof(dma_addr_t), GFP_KERNEL);
495 ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
497 ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
498 mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
502 mtr->hem_cfg.region[0].offset = 0;
503 mtr->hem_cfg.region[0].count = mr->npages;
504 mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
505 mtr->hem_cfg.region_count = 1;
506 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
508 ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
511 mr->pbl_mtr.hem_cfg.buf_pg_shift = ilog2(ibmr->page_size);
516 kvfree(mr->page_list);
517 mr->page_list = NULL;
522 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
523 struct hns_roce_mw *mw)
525 struct device *dev = hr_dev->dev;
529 ret = hns_roce_hw_destroy_mpt(hr_dev, NULL,
530 key_to_hw_index(mw->rkey) &
531 (hr_dev->caps.num_mtpts - 1));
533 dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
535 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
536 key_to_hw_index(mw->rkey));
539 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
540 key_to_hw_index(mw->rkey), BITMAP_NO_RR);
543 static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
544 struct hns_roce_mw *mw)
546 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
547 struct hns_roce_cmd_mailbox *mailbox;
548 struct device *dev = hr_dev->dev;
549 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
552 /* prepare HEM entry memory */
553 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
557 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
558 if (IS_ERR(mailbox)) {
559 ret = PTR_ERR(mailbox);
563 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
565 dev_err(dev, "MW write mtpt fail!\n");
569 ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
570 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
572 dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
578 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
583 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
586 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
591 int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
593 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
594 struct hns_roce_mw *mw = to_hr_mw(ibmw);
595 unsigned long index = 0;
598 /* Allocate a key for mw from bitmap */
599 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
603 mw->rkey = hw_index_to_key(index);
605 ibmw->rkey = mw->rkey;
606 mw->pdn = to_hr_pd(ibmw->pd)->pdn;
607 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
608 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
609 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
611 ret = hns_roce_mw_enable(hr_dev, mw);
618 hns_roce_mw_free(hr_dev, mw);
622 int hns_roce_dealloc_mw(struct ib_mw *ibmw)
624 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
625 struct hns_roce_mw *mw = to_hr_mw(ibmw);
627 hns_roce_mw_free(hr_dev, mw);
631 static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
632 dma_addr_t *pages, struct hns_roce_buf_region *region)
642 /* if hopnum is 0, buffer cannot store BAs, so skip write mtt */
646 offset = region->offset;
647 end = offset + region->count;
649 while (offset < end) {
650 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
651 offset, &count, NULL);
655 for (i = 0; i < count; i++) {
656 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
657 addr = to_hr_hw_page_addr(pages[npage]);
661 mtts[i] = cpu_to_le64(addr);
670 static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
674 for (i = 0; i < attr->region_count; i++)
675 if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
676 attr->region[i].hopnum > 0)
679 /* because the mtr only one root base address, when hopnum is 0 means
680 * root base address equals the first buffer address, thus all alloced
681 * memory must in a continuous space accessed by direct mode.
686 static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
691 for (i = 0; i < attr->region_count; i++)
692 size += attr->region[i].size;
697 static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
698 unsigned int page_shift)
701 return ALIGN(alloc_size, 1 << page_shift);
703 return HNS_HW_DIRECT_PAGE_COUNT << page_shift;
707 * check the given pages in continuous address space
708 * Returns 0 on success, or the error page num.
710 static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
711 unsigned int page_shift)
713 size_t page_size = 1 << page_shift;
716 for (i = 1; i < page_count; i++)
717 if (pages[i] - pages[i - 1] != page_size)
723 static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
725 /* release user buffers */
727 ib_umem_release(mtr->umem);
731 /* release kernel buffers */
733 hns_roce_buf_free(hr_dev, mtr->kmem);
739 static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
740 struct hns_roce_buf_attr *buf_attr, bool is_direct,
741 struct ib_udata *udata, unsigned long user_addr)
743 struct ib_device *ibdev = &hr_dev->ib_dev;
744 unsigned int best_pg_shift;
745 int all_pg_count = 0;
750 total_size = mtr_bufs_size(buf_attr);
751 if (total_size < 1) {
752 ibdev_err(ibdev, "Failed to check mtr size\n");
757 unsigned long pgsz_bitmap;
758 unsigned long page_size;
761 mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
762 buf_attr->user_access);
763 if (IS_ERR_OR_NULL(mtr->umem)) {
764 ibdev_err(ibdev, "Failed to get umem, ret %ld\n",
768 if (buf_attr->fixed_page)
769 pgsz_bitmap = 1 << buf_attr->page_shift;
771 pgsz_bitmap = GENMASK(buf_attr->page_shift, PAGE_SHIFT);
773 page_size = ib_umem_find_best_pgsz(mtr->umem, pgsz_bitmap,
777 best_pg_shift = order_base_2(page_size);
778 all_pg_count = ib_umem_num_dma_blocks(mtr->umem, page_size);
782 mtr->kmem = kzalloc(sizeof(*mtr->kmem), GFP_KERNEL);
784 ibdev_err(ibdev, "Failed to alloc kmem\n");
787 direct_size = mtr_kmem_direct_size(is_direct, total_size,
788 buf_attr->page_shift);
789 ret = hns_roce_buf_alloc(hr_dev, total_size, direct_size,
790 mtr->kmem, buf_attr->page_shift);
792 ibdev_err(ibdev, "Failed to alloc kmem, ret %d\n", ret);
795 best_pg_shift = buf_attr->page_shift;
796 all_pg_count = mtr->kmem->npages;
799 /* must bigger than minimum hardware page shift */
800 if (best_pg_shift < HNS_HW_PAGE_SHIFT || all_pg_count < 1) {
802 ibdev_err(ibdev, "Failed to check mtr page shift %d count %d\n",
803 best_pg_shift, all_pg_count);
807 mtr->hem_cfg.buf_pg_shift = best_pg_shift;
808 mtr->hem_cfg.buf_pg_count = all_pg_count;
812 mtr_free_bufs(hr_dev, mtr);
816 static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
817 dma_addr_t *pages, int count, unsigned int page_shift)
819 struct ib_device *ibdev = &hr_dev->ib_dev;
824 npage = hns_roce_get_umem_bufs(hr_dev, pages, count, 0,
825 mtr->umem, page_shift);
827 npage = hns_roce_get_kmem_bufs(hr_dev, pages, count, 0,
830 if (mtr->hem_cfg.is_direct && npage > 1) {
831 err = mtr_check_direct_pages(pages, npage, page_shift);
833 ibdev_err(ibdev, "Failed to check %s direct page-%d\n",
834 mtr->umem ? "user" : "kernel", err);
842 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
843 dma_addr_t *pages, int page_cnt)
845 struct ib_device *ibdev = &hr_dev->ib_dev;
846 struct hns_roce_buf_region *r;
851 * Only use the first page address as root ba when hopnum is 0, this
852 * is because the addresses of all pages are consecutive in this case.
854 if (mtr->hem_cfg.is_direct) {
855 mtr->hem_cfg.root_ba = pages[0];
859 for (i = 0; i < mtr->hem_cfg.region_count; i++) {
860 r = &mtr->hem_cfg.region[i];
861 if (r->offset + r->count > page_cnt) {
864 "failed to check mtr%u end %u + %u, max %u.\n",
865 i, r->offset, r->count, page_cnt);
869 err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
872 "failed to map mtr%u offset %u, ret = %d.\n",
881 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
882 int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
884 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
893 if (!mtt_buf || mtt_max < 1)
896 /* no mtt memory in direct mode, so just return the buffer address */
897 if (cfg->is_direct) {
898 start_index = offset >> HNS_HW_PAGE_SHIFT;
899 for (mtt_count = 0; mtt_count < cfg->region_count &&
900 total < mtt_max; mtt_count++) {
901 npage = cfg->region[mtt_count].offset;
902 if (npage < start_index)
905 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
906 if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
907 mtt_buf[total] = to_hr_hw_page_addr(addr);
909 mtt_buf[total] = addr;
917 start_index = offset >> cfg->buf_pg_shift;
921 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
924 if (!mtts || !mtt_count)
927 npage = min(mtt_count, left);
929 for (mtt_count = 0; mtt_count < npage; mtt_count++)
930 mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
935 *base_addr = cfg->root_ba;
940 static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
941 struct hns_roce_buf_attr *attr,
942 struct hns_roce_hem_cfg *cfg,
943 unsigned int *buf_page_shift)
945 struct hns_roce_buf_region *r;
946 unsigned int page_shift;
951 if (cfg->is_direct) {
952 buf_size = cfg->buf_pg_count << cfg->buf_pg_shift;
953 page_cnt = DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE);
955 * When HEM buffer use level-0 addressing, the page size equals
956 * the buffer size, and the the page size = 4K * 2^N.
958 cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT + order_base_2(page_cnt);
959 if (attr->region_count > 1) {
960 cfg->buf_pg_count = page_cnt;
961 page_shift = HNS_HW_PAGE_SHIFT;
963 cfg->buf_pg_count = 1;
964 page_shift = cfg->buf_pg_shift;
965 if (buf_size != 1 << page_shift) {
966 ibdev_err(&hr_dev->ib_dev,
967 "failed to check direct size %zu shift %d.\n",
968 buf_size, page_shift);
973 page_shift = cfg->buf_pg_shift;
976 /* convert buffer size to page index and page count */
977 for (page_cnt = 0, region_cnt = 0; page_cnt < cfg->buf_pg_count &&
978 region_cnt < attr->region_count &&
979 region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
980 r = &cfg->region[region_cnt];
981 r->offset = page_cnt;
982 buf_size = hr_hw_page_align(attr->region[region_cnt].size);
983 r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
984 page_cnt += r->count;
985 r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
989 if (region_cnt < 1) {
990 ibdev_err(&hr_dev->ib_dev,
991 "failed to check mtr region count, pages = %d.\n",
996 cfg->region_count = region_cnt;
997 *buf_page_shift = page_shift;
1003 * hns_roce_mtr_create - Create hns memory translate region.
1005 * @mtr: memory translate region
1006 * @buf_attr: buffer attribute for creating mtr
1007 * @ba_page_shift: page shift for multi-hop base address table
1008 * @udata: user space context, if it's NULL, means kernel space
1009 * @user_addr: userspace virtual address to start at
1011 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1012 struct hns_roce_buf_attr *buf_attr,
1013 unsigned int ba_page_shift, struct ib_udata *udata,
1014 unsigned long user_addr)
1016 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
1017 struct ib_device *ibdev = &hr_dev->ib_dev;
1018 unsigned int buf_page_shift = 0;
1019 dma_addr_t *pages = NULL;
1024 /* if disable mtt, all pages must in a continuous address range */
1025 cfg->is_direct = !mtr_has_mtt(buf_attr);
1027 /* if buffer only need mtt, just init the hem cfg */
1028 if (buf_attr->mtt_only) {
1029 cfg->buf_pg_shift = buf_attr->page_shift;
1030 cfg->buf_pg_count = mtr_bufs_size(buf_attr) >>
1031 buf_attr->page_shift;
1035 ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, cfg->is_direct,
1039 "failed to alloc mtr bufs, ret = %d.\n", ret);
1044 all_pg_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, cfg, &buf_page_shift);
1045 if (all_pg_cnt < 1) {
1047 ibdev_err(ibdev, "failed to init mtr buf cfg.\n");
1048 goto err_alloc_bufs;
1051 hns_roce_hem_list_init(&mtr->hem_list);
1052 if (!cfg->is_direct) {
1053 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
1054 cfg->region, cfg->region_count,
1057 ibdev_err(ibdev, "failed to request mtr hem, ret = %d.\n",
1059 goto err_alloc_bufs;
1061 cfg->root_ba = mtr->hem_list.root_ba;
1062 cfg->ba_pg_shift = ba_page_shift;
1064 cfg->ba_pg_shift = cfg->buf_pg_shift;
1067 /* no buffer to map */
1068 if (buf_attr->mtt_only)
1071 /* alloc a tmp array to store buffer's dma address */
1072 pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL);
1075 ibdev_err(ibdev, "failed to alloc mtr page list %d.\n",
1077 goto err_alloc_hem_list;
1080 get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt,
1082 if (get_pg_cnt != all_pg_cnt) {
1083 ibdev_err(ibdev, "failed to get mtr page %d != %d.\n",
1084 get_pg_cnt, all_pg_cnt);
1086 goto err_alloc_page_list;
1089 /* write buffer's dma address to BA table */
1090 ret = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
1092 ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
1093 goto err_alloc_page_list;
1096 /* drop tmp array */
1099 err_alloc_page_list:
1102 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1104 mtr_free_bufs(hr_dev, mtr);
1108 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1110 /* release multi-hop addressing resource */
1111 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1114 mtr_free_bufs(hr_dev, mtr);