2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include <linux/vmalloc.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_device.h"
38 #include "hns_roce_cmd.h"
39 #include "hns_roce_hem.h"
41 static u32 hw_index_to_key(unsigned long ind)
43 return (u32)(ind >> 24) | (ind << 8);
46 unsigned long key_to_hw_index(u32 key)
48 return (key << 24) | (key >> 8);
51 static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
52 struct hns_roce_cmd_mailbox *mailbox,
53 unsigned long mpt_index)
55 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
56 HNS_ROCE_CMD_SW2HW_MPT,
57 HNS_ROCE_CMD_TIMEOUT_MSECS);
60 int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
61 struct hns_roce_cmd_mailbox *mailbox,
62 unsigned long mpt_index)
64 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
65 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
66 HNS_ROCE_CMD_TIMEOUT_MSECS);
69 static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
75 spin_lock(&buddy->lock);
77 for (o = order; o <= buddy->max_order; ++o) {
78 if (buddy->num_free[o]) {
79 m = 1 << (buddy->max_order - o);
80 *seg = find_first_bit(buddy->bits[o], m);
85 spin_unlock(&buddy->lock);
89 clear_bit(*seg, buddy->bits[o]);
95 set_bit(*seg ^ 1, buddy->bits[o]);
99 spin_unlock(&buddy->lock);
105 static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
110 spin_lock(&buddy->lock);
112 while (test_bit(seg ^ 1, buddy->bits[order])) {
113 clear_bit(seg ^ 1, buddy->bits[order]);
114 --buddy->num_free[order];
119 set_bit(seg, buddy->bits[order]);
120 ++buddy->num_free[order];
122 spin_unlock(&buddy->lock);
125 static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
129 buddy->max_order = max_order;
130 spin_lock_init(&buddy->lock);
131 buddy->bits = kcalloc(buddy->max_order + 1,
132 sizeof(*buddy->bits),
134 buddy->num_free = kcalloc(buddy->max_order + 1,
135 sizeof(*buddy->num_free),
137 if (!buddy->bits || !buddy->num_free)
140 for (i = 0; i <= buddy->max_order; ++i) {
141 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
142 buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
144 if (!buddy->bits[i]) {
145 buddy->bits[i] = vzalloc(s * sizeof(long));
151 set_bit(0, buddy->bits[buddy->max_order]);
152 buddy->num_free[buddy->max_order] = 1;
157 for (i = 0; i <= buddy->max_order; ++i)
158 kvfree(buddy->bits[i]);
162 kfree(buddy->num_free);
166 static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
170 for (i = 0; i <= buddy->max_order; ++i)
171 kvfree(buddy->bits[i]);
174 kfree(buddy->num_free);
177 static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
180 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
183 ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
187 if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
188 *seg + (1 << order) - 1)) {
189 hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
196 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
197 struct hns_roce_mtt *mtt)
202 /* Page num is zero, correspond to DMA memory register */
205 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
209 /* Note: if page_shift is zero, FAST memory register */
210 mtt->page_shift = page_shift;
212 /* Compute MTT entry necessary */
213 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
217 /* Allocate MTT entry */
218 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
225 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
227 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
232 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
233 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
234 mtt->first_seg + (1 << mtt->order) - 1);
237 static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
238 u64 size, u32 access, int npages,
239 struct hns_roce_mr *mr)
241 unsigned long index = 0;
243 struct device *dev = &hr_dev->pdev->dev;
245 /* Allocate a key for mr from mr_table */
246 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
250 mr->iova = iova; /* MR va starting addr */
251 mr->size = size; /* MR addr range */
252 mr->pd = pd; /* MR num */
253 mr->access = access; /* MR access permit */
254 mr->enabled = 0; /* MR active status */
255 mr->key = hw_index_to_key(index); /* MR key */
258 mr->type = MR_TYPE_DMA;
260 mr->pbl_dma_addr = 0;
262 mr->type = MR_TYPE_MR;
263 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
273 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
274 struct hns_roce_mr *mr)
276 struct device *dev = &hr_dev->pdev->dev;
281 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
282 & (hr_dev->caps.num_mtpts - 1));
284 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
287 if (mr->size != ~0ULL) {
288 npages = ib_umem_page_count(mr->umem);
289 dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
293 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
294 key_to_hw_index(mr->key), BITMAP_NO_RR);
297 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
298 struct hns_roce_mr *mr)
301 unsigned long mtpt_idx = key_to_hw_index(mr->key);
302 struct device *dev = &hr_dev->pdev->dev;
303 struct hns_roce_cmd_mailbox *mailbox;
304 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
306 /* Prepare HEM entry memory */
307 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
311 /* Allocate mailbox memory */
312 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
313 if (IS_ERR(mailbox)) {
314 ret = PTR_ERR(mailbox);
318 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
320 dev_err(dev, "Write mtpt fail!\n");
324 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
325 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
327 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
332 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
337 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
340 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
344 static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
345 struct hns_roce_mtt *mtt, u32 start_index,
346 u32 npages, u64 *page_list)
350 dma_addr_t dma_handle;
351 u32 s = start_index * sizeof(u64);
353 /* All MTTs must fit in the same page */
354 if (start_index / (PAGE_SIZE / sizeof(u64)) !=
355 (start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
358 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
361 mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
362 mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
367 /* Save page addr, low 12 bits : 0 */
368 for (i = 0; i < npages; ++i)
369 mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
374 static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
375 struct hns_roce_mtt *mtt, u32 start_index,
376 u32 npages, u64 *page_list)
385 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
387 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
393 start_index += chunk;
400 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
401 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
405 u64 *page_list = NULL;
407 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
411 for (i = 0; i < buf->npages; ++i) {
413 page_list[i] = buf->direct.map + (i << buf->page_shift);
415 page_list[i] = buf->page_list[i].map;
418 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
425 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
427 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
430 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
431 hr_dev->caps.num_mtpts,
432 hr_dev->caps.num_mtpts - 1,
433 hr_dev->caps.reserved_mrws, 0);
437 ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
438 ilog2(hr_dev->caps.num_mtt_segs));
445 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
449 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
451 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
453 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
454 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
457 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
460 struct hns_roce_mr *mr = NULL;
462 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
464 return ERR_PTR(-ENOMEM);
466 /* Allocate memory region key */
467 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
472 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
476 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
482 hns_roce_mr_free(to_hr_dev(pd->device), mr);
489 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
490 struct hns_roce_mtt *mtt, struct ib_umem *umem)
492 struct scatterlist *sg;
499 pages = (u64 *) __get_free_page(GFP_KERNEL);
505 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
506 len = sg_dma_len(sg) >> mtt->page_shift;
507 for (k = 0; k < len; ++k) {
508 pages[i++] = sg_dma_address(sg) +
509 (k << umem->page_shift);
510 if (i == PAGE_SIZE / sizeof(u64)) {
511 ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
522 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
525 free_page((unsigned long) pages);
529 static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
530 struct ib_umem *umem)
534 struct scatterlist *sg;
536 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
537 mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
547 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
548 u64 virt_addr, int access_flags,
549 struct ib_udata *udata)
551 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
552 struct device *dev = &hr_dev->pdev->dev;
553 struct hns_roce_mr *mr = NULL;
557 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
559 return ERR_PTR(-ENOMEM);
561 mr->umem = ib_umem_get(pd->uobject->context, start, length,
563 if (IS_ERR(mr->umem)) {
564 ret = PTR_ERR(mr->umem);
568 n = ib_umem_page_count(mr->umem);
569 if (mr->umem->page_shift != HNS_ROCE_HEM_PAGE_SHIFT) {
570 dev_err(dev, "Just support 4K page size but is 0x%lx now!\n",
571 BIT(mr->umem->page_shift));
576 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
577 dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
583 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
584 access_flags, n, mr);
588 ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
592 ret = hns_roce_mr_enable(hr_dev, mr);
596 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
601 hns_roce_mr_free(hr_dev, mr);
604 ib_umem_release(mr->umem);
611 int hns_roce_dereg_mr(struct ib_mr *ibmr)
613 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
614 struct hns_roce_mr *mr = to_hr_mr(ibmr);
617 if (hr_dev->hw->dereg_mr) {
618 ret = hr_dev->hw->dereg_mr(hr_dev, mr);
620 hns_roce_mr_free(hr_dev, mr);
623 ib_umem_release(mr->umem);