2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR = 8,
48 #define MLX5_UMR_ALIGN 2048
49 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
50 static __be64 mlx5_ib_update_mtt_emergency_buffer[
51 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
52 __aligned(MLX5_UMR_ALIGN);
53 static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
56 static int clean_mr(struct mlx5_ib_mr *mr);
58 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
60 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
62 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
63 /* Wait until all page fault handlers using the mr complete. */
64 synchronize_srcu(&dev->mr_srcu);
70 static int order2idx(struct mlx5_ib_dev *dev, int order)
72 struct mlx5_mr_cache *cache = &dev->cache;
74 if (order < cache->ent[0].order)
77 return order - cache->ent[0].order;
80 static void reg_mr_callback(int status, void *context)
82 struct mlx5_ib_mr *mr = context;
83 struct mlx5_ib_dev *dev = mr->dev;
84 struct mlx5_mr_cache *cache = &dev->cache;
85 int c = order2idx(dev, mr->order);
86 struct mlx5_cache_ent *ent = &cache->ent[c];
89 struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
92 spin_lock_irqsave(&ent->lock, flags);
94 spin_unlock_irqrestore(&ent->lock, flags);
96 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
99 mod_timer(&dev->delay_timer, jiffies + HZ);
103 if (mr->out.hdr.status) {
104 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
106 be32_to_cpu(mr->out.hdr.syndrome));
109 mod_timer(&dev->delay_timer, jiffies + HZ);
113 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
114 key = dev->mdev->priv.mkey_key++;
115 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
116 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
118 cache->last_add = jiffies;
120 spin_lock_irqsave(&ent->lock, flags);
121 list_add_tail(&mr->list, &ent->head);
124 spin_unlock_irqrestore(&ent->lock, flags);
126 write_lock_irqsave(&table->lock, flags);
127 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
130 pr_err("Error inserting to mr tree. 0x%x\n", -err);
131 write_unlock_irqrestore(&table->lock, flags);
134 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
136 struct mlx5_mr_cache *cache = &dev->cache;
137 struct mlx5_cache_ent *ent = &cache->ent[c];
138 struct mlx5_create_mkey_mbox_in *in;
139 struct mlx5_ib_mr *mr;
140 int npages = 1 << ent->order;
144 in = kzalloc(sizeof(*in), GFP_KERNEL);
148 for (i = 0; i < num; i++) {
149 if (ent->pending >= MAX_PENDING_REG_MR) {
154 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
159 mr->order = ent->order;
162 in->seg.status = MLX5_MKEY_STATUS_FREE;
163 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
164 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
165 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
166 in->seg.log2_page_size = 12;
168 spin_lock_irq(&ent->lock);
170 spin_unlock_irq(&ent->lock);
171 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
172 sizeof(*in), reg_mr_callback,
175 spin_lock_irq(&ent->lock);
177 spin_unlock_irq(&ent->lock);
178 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
188 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
190 struct mlx5_mr_cache *cache = &dev->cache;
191 struct mlx5_cache_ent *ent = &cache->ent[c];
192 struct mlx5_ib_mr *mr;
196 for (i = 0; i < num; i++) {
197 spin_lock_irq(&ent->lock);
198 if (list_empty(&ent->head)) {
199 spin_unlock_irq(&ent->lock);
202 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
206 spin_unlock_irq(&ent->lock);
207 err = destroy_mkey(dev, mr);
209 mlx5_ib_warn(dev, "failed destroy mkey\n");
215 static ssize_t size_write(struct file *filp, const char __user *buf,
216 size_t count, loff_t *pos)
218 struct mlx5_cache_ent *ent = filp->private_data;
219 struct mlx5_ib_dev *dev = ent->dev;
225 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
228 c = order2idx(dev, ent->order);
229 lbuf[sizeof(lbuf) - 1] = 0;
231 if (sscanf(lbuf, "%u", &var) != 1)
234 if (var < ent->limit)
237 if (var > ent->size) {
239 err = add_keys(dev, c, var - ent->size);
240 if (err && err != -EAGAIN)
243 usleep_range(3000, 5000);
245 } else if (var < ent->size) {
246 remove_keys(dev, c, ent->size - var);
252 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
255 struct mlx5_cache_ent *ent = filp->private_data;
262 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
266 if (copy_to_user(buf, lbuf, err))
274 static const struct file_operations size_fops = {
275 .owner = THIS_MODULE,
281 static ssize_t limit_write(struct file *filp, const char __user *buf,
282 size_t count, loff_t *pos)
284 struct mlx5_cache_ent *ent = filp->private_data;
285 struct mlx5_ib_dev *dev = ent->dev;
291 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
294 c = order2idx(dev, ent->order);
295 lbuf[sizeof(lbuf) - 1] = 0;
297 if (sscanf(lbuf, "%u", &var) != 1)
305 if (ent->cur < ent->limit) {
306 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
314 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
317 struct mlx5_cache_ent *ent = filp->private_data;
324 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
328 if (copy_to_user(buf, lbuf, err))
336 static const struct file_operations limit_fops = {
337 .owner = THIS_MODULE,
339 .write = limit_write,
343 static int someone_adding(struct mlx5_mr_cache *cache)
347 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
348 if (cache->ent[i].cur < cache->ent[i].limit)
355 static void __cache_work_func(struct mlx5_cache_ent *ent)
357 struct mlx5_ib_dev *dev = ent->dev;
358 struct mlx5_mr_cache *cache = &dev->cache;
359 int i = order2idx(dev, ent->order);
365 ent = &dev->cache.ent[i];
366 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
367 err = add_keys(dev, i, 1);
368 if (ent->cur < 2 * ent->limit) {
369 if (err == -EAGAIN) {
370 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
372 queue_delayed_work(cache->wq, &ent->dwork,
373 msecs_to_jiffies(3));
375 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
377 queue_delayed_work(cache->wq, &ent->dwork,
378 msecs_to_jiffies(1000));
380 queue_work(cache->wq, &ent->work);
383 } else if (ent->cur > 2 * ent->limit) {
385 * The remove_keys() logic is performed as garbage collection
386 * task. Such task is intended to be run when no other active
387 * processes are running.
389 * The need_resched() will return TRUE if there are user tasks
390 * to be activated in near future.
392 * In such case, we don't execute remove_keys() and postpone
393 * the garbage collection work to try to run in next cycle,
394 * in order to free CPU resources to other tasks.
396 if (!need_resched() && !someone_adding(cache) &&
397 time_after(jiffies, cache->last_add + 300 * HZ)) {
398 remove_keys(dev, i, 1);
399 if (ent->cur > ent->limit)
400 queue_work(cache->wq, &ent->work);
402 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
407 static void delayed_cache_work_func(struct work_struct *work)
409 struct mlx5_cache_ent *ent;
411 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
412 __cache_work_func(ent);
415 static void cache_work_func(struct work_struct *work)
417 struct mlx5_cache_ent *ent;
419 ent = container_of(work, struct mlx5_cache_ent, work);
420 __cache_work_func(ent);
423 static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
425 struct mlx5_mr_cache *cache = &dev->cache;
426 struct mlx5_ib_mr *mr = NULL;
427 struct mlx5_cache_ent *ent;
431 c = order2idx(dev, order);
432 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
433 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
437 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
438 ent = &cache->ent[i];
440 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
442 spin_lock_irq(&ent->lock);
443 if (!list_empty(&ent->head)) {
444 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
448 spin_unlock_irq(&ent->lock);
449 if (ent->cur < ent->limit)
450 queue_work(cache->wq, &ent->work);
453 spin_unlock_irq(&ent->lock);
455 queue_work(cache->wq, &ent->work);
459 cache->ent[c].miss++;
464 static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
466 struct mlx5_mr_cache *cache = &dev->cache;
467 struct mlx5_cache_ent *ent;
471 c = order2idx(dev, mr->order);
472 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
473 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
476 ent = &cache->ent[c];
477 spin_lock_irq(&ent->lock);
478 list_add_tail(&mr->list, &ent->head);
480 if (ent->cur > 2 * ent->limit)
482 spin_unlock_irq(&ent->lock);
485 queue_work(cache->wq, &ent->work);
488 static void clean_keys(struct mlx5_ib_dev *dev, int c)
490 struct mlx5_mr_cache *cache = &dev->cache;
491 struct mlx5_cache_ent *ent = &cache->ent[c];
492 struct mlx5_ib_mr *mr;
495 cancel_delayed_work(&ent->dwork);
497 spin_lock_irq(&ent->lock);
498 if (list_empty(&ent->head)) {
499 spin_unlock_irq(&ent->lock);
502 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
506 spin_unlock_irq(&ent->lock);
507 err = destroy_mkey(dev, mr);
509 mlx5_ib_warn(dev, "failed destroy mkey\n");
515 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
517 struct mlx5_mr_cache *cache = &dev->cache;
518 struct mlx5_cache_ent *ent;
521 if (!mlx5_debugfs_root)
524 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
528 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
529 ent = &cache->ent[i];
530 sprintf(ent->name, "%d", ent->order);
531 ent->dir = debugfs_create_dir(ent->name, cache->root);
535 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
540 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
545 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
550 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
559 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
561 if (!mlx5_debugfs_root)
564 debugfs_remove_recursive(dev->cache.root);
567 static void delay_time_func(unsigned long ctx)
569 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
574 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
576 struct mlx5_mr_cache *cache = &dev->cache;
577 struct mlx5_cache_ent *ent;
582 cache->wq = create_singlethread_workqueue("mkey_cache");
584 mlx5_ib_warn(dev, "failed to create work queue\n");
588 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
589 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
590 INIT_LIST_HEAD(&cache->ent[i].head);
591 spin_lock_init(&cache->ent[i].lock);
593 ent = &cache->ent[i];
594 INIT_LIST_HEAD(&ent->head);
595 spin_lock_init(&ent->lock);
599 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
600 limit = dev->mdev->profile->mr_cache[i].limit;
604 INIT_WORK(&ent->work, cache_work_func);
605 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
607 queue_work(cache->wq, &ent->work);
610 err = mlx5_mr_cache_debugfs_init(dev);
612 mlx5_ib_warn(dev, "cache debugfs failure\n");
617 static void wait_for_async_commands(struct mlx5_ib_dev *dev)
619 struct mlx5_mr_cache *cache = &dev->cache;
620 struct mlx5_cache_ent *ent;
625 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
626 ent = &cache->ent[i];
627 for (j = 0 ; j < 1000; j++) {
633 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
634 ent = &cache->ent[i];
635 total += ent->pending;
639 mlx5_ib_warn(dev, "aborted while there are %d pending mr requests\n", total);
641 mlx5_ib_warn(dev, "done with all pending requests\n");
644 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
648 dev->cache.stopped = 1;
649 flush_workqueue(dev->cache.wq);
651 mlx5_mr_cache_debugfs_cleanup(dev);
653 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
656 destroy_workqueue(dev->cache.wq);
657 wait_for_async_commands(dev);
658 del_timer_sync(&dev->delay_timer);
663 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
665 struct mlx5_ib_dev *dev = to_mdev(pd->device);
666 struct mlx5_core_dev *mdev = dev->mdev;
667 struct mlx5_create_mkey_mbox_in *in;
668 struct mlx5_mkey_seg *seg;
669 struct mlx5_ib_mr *mr;
672 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
674 return ERR_PTR(-ENOMEM);
676 in = kzalloc(sizeof(*in), GFP_KERNEL);
683 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
684 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
685 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
688 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
694 mr->ibmr.lkey = mr->mmr.key;
695 mr->ibmr.rkey = mr->mmr.key;
709 static int get_octo_len(u64 addr, u64 len, int page_size)
714 offset = addr & (page_size - 1);
715 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
716 return (npages + 1) / 2;
719 static int use_umr(int order)
721 return order <= MLX5_MAX_UMR_SHIFT;
724 static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
725 struct ib_sge *sg, u64 dma, int n, u32 key,
726 int page_shift, u64 virt_addr, u64 len,
729 struct mlx5_ib_dev *dev = to_mdev(pd->device);
730 struct mlx5_umr_wr *umrwr = umr_wr(wr);
733 sg->length = ALIGN(sizeof(u64) * n, 64);
734 sg->lkey = dev->umrc.pd->local_dma_lkey;
744 wr->opcode = MLX5_IB_WR_UMR;
747 umrwr->page_shift = page_shift;
749 umrwr->target.virt_addr = virt_addr;
751 umrwr->access_flags = access_flags;
755 static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
756 struct ib_send_wr *wr, u32 key)
758 struct mlx5_umr_wr *umrwr = umr_wr(wr);
760 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
761 wr->opcode = MLX5_IB_WR_UMR;
765 void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
767 struct mlx5_ib_umr_context *context;
772 err = ib_poll_cq(cq, 1, &wc);
774 pr_warn("poll cq error %d\n", err);
780 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
781 context->status = wc.status;
782 complete(&context->done);
784 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
787 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
788 u64 virt_addr, u64 len, int npages,
789 int page_shift, int order, int access_flags)
791 struct mlx5_ib_dev *dev = to_mdev(pd->device);
792 struct device *ddev = dev->ib_dev.dma_device;
793 struct umr_common *umrc = &dev->umrc;
794 struct mlx5_ib_umr_context umr_context;
795 struct mlx5_umr_wr umrwr = {};
796 struct ib_send_wr *bad;
797 struct mlx5_ib_mr *mr;
806 for (i = 0; i < 1; i++) {
807 mr = alloc_cached_mr(dev, order);
811 err = add_keys(dev, order2idx(dev, order), 1);
812 if (err && err != -EAGAIN) {
813 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
819 return ERR_PTR(-EAGAIN);
821 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
822 * To avoid copying garbage after the pas array, we allocate
824 size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
825 mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
831 pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
832 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
833 /* Clear padding after the actual pages. */
834 memset(pas + npages, 0, size - npages * sizeof(u64));
836 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
837 if (dma_mapping_error(ddev, dma)) {
842 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
843 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
844 page_shift, virt_addr, len, access_flags);
846 mlx5_ib_init_umr_context(&umr_context);
848 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
850 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
853 wait_for_completion(&umr_context.done);
854 if (umr_context.status != IB_WC_SUCCESS) {
855 mlx5_ib_warn(dev, "reg umr failed\n");
860 mr->mmr.iova = virt_addr;
862 mr->mmr.pd = to_mpd(pd)->pdn;
868 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
875 free_cached_mr(dev, mr);
882 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
883 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
886 struct mlx5_ib_dev *dev = mr->dev;
887 struct device *ddev = dev->ib_dev.dma_device;
888 struct umr_common *umrc = &dev->umrc;
889 struct mlx5_ib_umr_context umr_context;
890 struct ib_umem *umem = mr->umem;
894 struct ib_send_wr *bad;
895 struct mlx5_umr_wr wr;
898 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
899 const int page_index_mask = page_index_alignment - 1;
900 size_t pages_mapped = 0;
901 size_t pages_to_map = 0;
902 size_t pages_iter = 0;
903 int use_emergency_buf = 0;
905 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
906 * so we need to align the offset and length accordingly */
907 if (start_page_index & page_index_mask) {
908 npages += start_page_index & page_index_mask;
909 start_page_index &= ~page_index_mask;
912 pages_to_map = ALIGN(npages, page_index_alignment);
914 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
917 size = sizeof(u64) * pages_to_map;
918 size = min_t(int, PAGE_SIZE, size);
919 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
920 * code, when we are called from an invalidation. The pas buffer must
921 * be 2k-aligned for Connect-IB. */
922 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
924 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
925 pas = mlx5_ib_update_mtt_emergency_buffer;
926 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
927 use_emergency_buf = 1;
928 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
929 memset(pas, 0, size);
931 pages_iter = size / sizeof(u64);
932 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
933 if (dma_mapping_error(ddev, dma)) {
934 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
939 for (pages_mapped = 0;
940 pages_mapped < pages_to_map && !err;
941 pages_mapped += pages_iter, start_page_index += pages_iter) {
942 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
944 npages = min_t(size_t,
946 ib_umem_num_pages(umem) - start_page_index);
949 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
950 start_page_index, npages, pas,
951 MLX5_IB_MTT_PRESENT);
952 /* Clear padding after the pages brought from the
954 memset(pas + npages, 0, size - npages * sizeof(u64));
957 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
959 memset(&wr, 0, sizeof(wr));
960 wr.wr.wr_id = (u64)(unsigned long)&umr_context;
963 sg.length = ALIGN(npages * sizeof(u64),
964 MLX5_UMR_MTT_ALIGNMENT);
965 sg.lkey = dev->umrc.pd->local_dma_lkey;
967 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
968 MLX5_IB_SEND_UMR_UPDATE_MTT;
971 wr.wr.opcode = MLX5_IB_WR_UMR;
972 wr.npages = sg.length / sizeof(u64);
973 wr.page_shift = PAGE_SHIFT;
974 wr.mkey = mr->mmr.key;
975 wr.target.offset = start_page_index;
977 mlx5_ib_init_umr_context(&umr_context);
979 err = ib_post_send(umrc->qp, &wr.wr, &bad);
981 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
983 wait_for_completion(&umr_context.done);
984 if (umr_context.status != IB_WC_SUCCESS) {
985 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
992 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
995 if (!use_emergency_buf)
996 free_page((unsigned long)pas);
998 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
1004 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
1005 u64 length, struct ib_umem *umem,
1006 int npages, int page_shift,
1009 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1010 struct mlx5_create_mkey_mbox_in *in;
1011 struct mlx5_ib_mr *mr;
1014 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1016 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1018 return ERR_PTR(-ENOMEM);
1020 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
1021 in = mlx5_vzalloc(inlen);
1026 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
1027 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1029 /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
1030 * in the page list submitted with the command. */
1031 in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
1032 in->seg.flags = convert_access(access_flags) |
1033 MLX5_ACCESS_MODE_MTT;
1034 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1035 in->seg.start_addr = cpu_to_be64(virt_addr);
1036 in->seg.len = cpu_to_be64(length);
1037 in->seg.bsfs_octo_size = 0;
1038 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
1039 in->seg.log2_page_size = page_shift;
1040 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1041 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
1043 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
1046 mlx5_ib_warn(dev, "create mkey failed\n");
1054 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
1064 return ERR_PTR(err);
1067 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1068 u64 virt_addr, int access_flags,
1069 struct ib_udata *udata)
1071 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1072 struct mlx5_ib_mr *mr = NULL;
1073 struct ib_umem *umem;
1080 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1081 start, virt_addr, length, access_flags);
1082 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
1085 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
1086 return (void *)umem;
1089 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
1091 mlx5_ib_warn(dev, "avoid zero region\n");
1096 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
1097 npages, ncont, order, page_shift);
1099 if (use_umr(order)) {
1100 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1101 order, access_flags);
1102 if (PTR_ERR(mr) == -EAGAIN) {
1103 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1106 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
1108 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1113 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
1121 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
1124 mr->npages = npages;
1125 atomic_add(npages, &dev->mdev->priv.reg_pages);
1126 mr->ibmr.lkey = mr->mmr.key;
1127 mr->ibmr.rkey = mr->mmr.key;
1129 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1130 if (umem->odp_data) {
1132 * This barrier prevents the compiler from moving the
1133 * setting of umem->odp_data->private to point to our
1134 * MR, before reg_umr finished, to ensure that the MR
1135 * initialization have finished before starting to
1136 * handle invalidations.
1139 mr->umem->odp_data->private = mr;
1141 * Make sure we will see the new
1142 * umem->odp_data->private value in the invalidation
1143 * routines, before we can get page faults on the
1144 * MR. Page faults can happen once we put the MR in
1145 * the tree, below this line. Without the barrier,
1146 * there can be a fault handling and an invalidation
1147 * before umem->odp_data->private == mr is visible to
1148 * the invalidation handler.
1157 ib_umem_release(umem);
1158 return ERR_PTR(err);
1161 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1163 struct umr_common *umrc = &dev->umrc;
1164 struct mlx5_ib_umr_context umr_context;
1165 struct mlx5_umr_wr umrwr = {};
1166 struct ib_send_wr *bad;
1169 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
1170 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
1172 mlx5_ib_init_umr_context(&umr_context);
1174 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1177 mlx5_ib_dbg(dev, "err %d\n", err);
1180 wait_for_completion(&umr_context.done);
1183 if (umr_context.status != IB_WC_SUCCESS) {
1184 mlx5_ib_warn(dev, "unreg umr failed\n");
1195 mlx5_alloc_priv_descs(struct ib_device *device,
1196 struct mlx5_ib_mr *mr,
1200 int size = ndescs * desc_size;
1204 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1206 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1207 if (!mr->descs_alloc)
1210 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1212 mr->desc_map = dma_map_single(device->dma_device, mr->descs,
1213 size, DMA_TO_DEVICE);
1214 if (dma_mapping_error(device->dma_device, mr->desc_map)) {
1221 kfree(mr->descs_alloc);
1227 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1230 struct ib_device *device = mr->ibmr.device;
1231 int size = mr->max_descs * mr->desc_size;
1233 dma_unmap_single(device->dma_device, mr->desc_map,
1234 size, DMA_TO_DEVICE);
1235 kfree(mr->descs_alloc);
1240 static int clean_mr(struct mlx5_ib_mr *mr)
1242 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1243 int umred = mr->umred;
1247 if (mlx5_core_destroy_psv(dev->mdev,
1248 mr->sig->psv_memory.psv_idx))
1249 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1250 mr->sig->psv_memory.psv_idx);
1251 if (mlx5_core_destroy_psv(dev->mdev,
1252 mr->sig->psv_wire.psv_idx))
1253 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1254 mr->sig->psv_wire.psv_idx);
1259 mlx5_free_priv_descs(mr);
1262 err = destroy_mkey(dev, mr);
1264 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1269 err = unreg_umr(dev, mr);
1271 mlx5_ib_warn(dev, "failed unregister\n");
1274 free_cached_mr(dev, mr);
1283 int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1285 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1286 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1287 int npages = mr->npages;
1288 struct ib_umem *umem = mr->umem;
1290 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1291 if (umem && umem->odp_data) {
1292 /* Prevent new page faults from succeeding */
1294 /* Wait for all running page-fault handlers to finish. */
1295 synchronize_srcu(&dev->mr_srcu);
1296 /* Destroy all page mappings */
1297 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1300 * We kill the umem before the MR for ODP,
1301 * so that there will not be any invalidations in
1302 * flight, looking at the *mr struct.
1304 ib_umem_release(umem);
1305 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1307 /* Avoid double-freeing the umem. */
1315 ib_umem_release(umem);
1316 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1322 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1323 enum ib_mr_type mr_type,
1326 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1327 struct mlx5_create_mkey_mbox_in *in;
1328 struct mlx5_ib_mr *mr;
1329 int access_mode, err;
1330 int ndescs = roundup(max_num_sg, 4);
1332 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1334 return ERR_PTR(-ENOMEM);
1336 in = kzalloc(sizeof(*in), GFP_KERNEL);
1342 in->seg.status = MLX5_MKEY_STATUS_FREE;
1343 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1344 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1345 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1347 if (mr_type == IB_MR_TYPE_MEM_REG) {
1348 access_mode = MLX5_ACCESS_MODE_MTT;
1349 in->seg.log2_page_size = PAGE_SHIFT;
1351 err = mlx5_alloc_priv_descs(pd->device, mr,
1352 ndescs, sizeof(u64));
1356 mr->desc_size = sizeof(u64);
1357 mr->max_descs = ndescs;
1358 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
1361 in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
1363 in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
1364 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1370 /* create mem & wire PSVs */
1371 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
1376 access_mode = MLX5_ACCESS_MODE_KLM;
1377 mr->sig->psv_memory.psv_idx = psv_index[0];
1378 mr->sig->psv_wire.psv_idx = psv_index[1];
1380 mr->sig->sig_status_checked = true;
1381 mr->sig->sig_err_exists = false;
1382 /* Next UMR, Arm SIGERR */
1383 ++mr->sig->sigerr_count;
1385 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1390 in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
1391 err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
1394 goto err_destroy_psv;
1396 mr->ibmr.lkey = mr->mmr.key;
1397 mr->ibmr.rkey = mr->mmr.key;
1405 if (mlx5_core_destroy_psv(dev->mdev,
1406 mr->sig->psv_memory.psv_idx))
1407 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1408 mr->sig->psv_memory.psv_idx);
1409 if (mlx5_core_destroy_psv(dev->mdev,
1410 mr->sig->psv_wire.psv_idx))
1411 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1412 mr->sig->psv_wire.psv_idx);
1414 mlx5_free_priv_descs(mr);
1421 return ERR_PTR(err);
1424 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1425 struct ib_mr_status *mr_status)
1427 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1430 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1431 pr_err("Invalid status check mask\n");
1436 mr_status->fail_status = 0;
1437 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1440 pr_err("signature status check requested on a non-signature enabled MR\n");
1444 mmr->sig->sig_status_checked = true;
1445 if (!mmr->sig->sig_err_exists)
1448 if (ibmr->lkey == mmr->sig->err_item.key)
1449 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1450 sizeof(mr_status->sig_err));
1452 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1453 mr_status->sig_err.sig_err_offset = 0;
1454 mr_status->sig_err.key = mmr->sig->err_item.key;
1457 mmr->sig->sig_err_exists = false;
1458 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1465 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1467 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1470 if (unlikely(mr->ndescs == mr->max_descs))
1474 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1479 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
1480 struct scatterlist *sg,
1483 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1488 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1489 mr->desc_size * mr->max_descs,
1492 n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
1494 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1495 mr->desc_size * mr->max_descs,