2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR = 8,
48 #define MLX5_UMR_ALIGN 2048
51 create_mkey_callback(int status, struct mlx5_async_work *context);
53 static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
56 struct mlx5_ib_dev *dev = to_mdev(pd->device);
58 MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
59 MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
60 MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
61 MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
62 MLX5_SET(mkc, mkc, lr, 1);
64 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
65 MLX5_SET(mkc, mkc, relaxed_ordering_write,
66 !!(acc & IB_ACCESS_RELAXED_ORDERING));
67 if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
68 MLX5_SET(mkc, mkc, relaxed_ordering_read,
69 !!(acc & IB_ACCESS_RELAXED_ORDERING));
71 MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
72 MLX5_SET(mkc, mkc, qpn, 0xffffff);
73 MLX5_SET64(mkc, mkc, start_addr, start_addr);
77 assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
80 u8 key = atomic_inc_return(&dev->mkey_var);
83 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
84 MLX5_SET(mkc, mkc, mkey_7_0, key);
89 mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
92 assign_mkey_variant(dev, mkey, in);
93 return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
97 mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
98 struct mlx5_core_mkey *mkey,
99 struct mlx5_async_ctx *async_ctx,
100 u32 *in, int inlen, u32 *out, int outlen,
101 struct mlx5_async_work *context)
103 MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
104 assign_mkey_variant(dev, mkey, in);
105 return mlx5_cmd_exec_cb(async_ctx, in, inlen, out, outlen,
106 create_mkey_callback, context);
109 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
110 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
111 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
112 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
114 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
116 return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
119 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
121 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
123 return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
126 static inline bool mlx5_ib_pas_fits_in_mr(struct mlx5_ib_mr *mr, u64 start,
129 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
130 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
133 static void create_mkey_callback(int status, struct mlx5_async_work *context)
135 struct mlx5_ib_mr *mr =
136 container_of(context, struct mlx5_ib_mr, cb_work);
137 struct mlx5_ib_dev *dev = mr->dev;
138 struct mlx5_cache_ent *ent = mr->cache_ent;
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
144 spin_lock_irqsave(&ent->lock, flags);
146 WRITE_ONCE(dev->fill_delay, 1);
147 spin_unlock_irqrestore(&ent->lock, flags);
148 mod_timer(&dev->delay_timer, jiffies + HZ);
152 mr->mmkey.type = MLX5_MKEY_MR;
153 mr->mmkey.key |= mlx5_idx_to_mkey(
154 MLX5_GET(create_mkey_out, mr->out, mkey_index));
156 WRITE_ONCE(dev->cache.last_add, jiffies);
158 spin_lock_irqsave(&ent->lock, flags);
159 list_add_tail(&mr->list, &ent->head);
160 ent->available_mrs++;
162 /* If we are doing fill_to_high_water then keep going. */
163 queue_adjust_cache_locked(ent);
165 spin_unlock_irqrestore(&ent->lock, flags);
168 static struct mlx5_ib_mr *alloc_cache_mr(struct mlx5_cache_ent *ent, void *mkc)
170 struct mlx5_ib_mr *mr;
172 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
175 mr->order = ent->order;
179 set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
180 MLX5_SET(mkc, mkc, free, 1);
181 MLX5_SET(mkc, mkc, umr_en, 1);
182 MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
183 MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
185 MLX5_SET(mkc, mkc, translations_octword_size, ent->xlt);
186 MLX5_SET(mkc, mkc, log_page_size, ent->page);
190 /* Asynchronously schedule new MRs to be populated in the cache. */
191 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
193 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
194 struct mlx5_ib_mr *mr;
200 in = kzalloc(inlen, GFP_KERNEL);
204 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
205 for (i = 0; i < num; i++) {
206 mr = alloc_cache_mr(ent, mkc);
211 spin_lock_irq(&ent->lock);
212 if (ent->pending >= MAX_PENDING_REG_MR) {
214 spin_unlock_irq(&ent->lock);
219 spin_unlock_irq(&ent->lock);
220 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
221 &ent->dev->async_ctx, in, inlen,
222 mr->out, sizeof(mr->out),
225 spin_lock_irq(&ent->lock);
227 spin_unlock_irq(&ent->lock);
228 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
238 /* Synchronously create a MR in the cache */
239 static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
241 size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
242 struct mlx5_ib_mr *mr;
247 in = kzalloc(inlen, GFP_KERNEL);
249 return ERR_PTR(-ENOMEM);
250 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
252 mr = alloc_cache_mr(ent, mkc);
258 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
262 mr->mmkey.type = MLX5_MKEY_MR;
263 WRITE_ONCE(ent->dev->cache.last_add, jiffies);
264 spin_lock_irq(&ent->lock);
266 spin_unlock_irq(&ent->lock);
276 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
278 struct mlx5_ib_mr *mr;
280 lockdep_assert_held(&ent->lock);
281 if (list_empty(&ent->head))
283 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
285 ent->available_mrs--;
287 spin_unlock_irq(&ent->lock);
288 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
290 spin_lock_irq(&ent->lock);
293 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
298 lockdep_assert_held(&ent->lock);
302 target = ent->limit * 2;
303 if (target == ent->available_mrs + ent->pending)
305 if (target > ent->available_mrs + ent->pending) {
306 u32 todo = target - (ent->available_mrs + ent->pending);
308 spin_unlock_irq(&ent->lock);
309 err = add_keys(ent, todo);
311 usleep_range(3000, 5000);
312 spin_lock_irq(&ent->lock);
319 remove_cache_mr_locked(ent);
324 static ssize_t size_write(struct file *filp, const char __user *buf,
325 size_t count, loff_t *pos)
327 struct mlx5_cache_ent *ent = filp->private_data;
331 err = kstrtou32_from_user(buf, count, 0, &target);
336 * Target is the new value of total_mrs the user requests, however we
337 * cannot free MRs that are in use. Compute the target value for
340 spin_lock_irq(&ent->lock);
341 if (target < ent->total_mrs - ent->available_mrs) {
345 target = target - (ent->total_mrs - ent->available_mrs);
346 if (target < ent->limit || target > ent->limit*2) {
350 err = resize_available_mrs(ent, target, false);
353 spin_unlock_irq(&ent->lock);
358 spin_unlock_irq(&ent->lock);
362 static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
365 struct mlx5_cache_ent *ent = filp->private_data;
369 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->total_mrs);
373 return simple_read_from_buffer(buf, count, pos, lbuf, err);
376 static const struct file_operations size_fops = {
377 .owner = THIS_MODULE,
383 static ssize_t limit_write(struct file *filp, const char __user *buf,
384 size_t count, loff_t *pos)
386 struct mlx5_cache_ent *ent = filp->private_data;
390 err = kstrtou32_from_user(buf, count, 0, &var);
395 * Upon set we immediately fill the cache to high water mark implied by
398 spin_lock_irq(&ent->lock);
400 err = resize_available_mrs(ent, 0, true);
401 spin_unlock_irq(&ent->lock);
407 static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
410 struct mlx5_cache_ent *ent = filp->private_data;
414 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
418 return simple_read_from_buffer(buf, count, pos, lbuf, err);
421 static const struct file_operations limit_fops = {
422 .owner = THIS_MODULE,
424 .write = limit_write,
428 static bool someone_adding(struct mlx5_mr_cache *cache)
432 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
433 struct mlx5_cache_ent *ent = &cache->ent[i];
436 spin_lock_irq(&ent->lock);
437 ret = ent->available_mrs < ent->limit;
438 spin_unlock_irq(&ent->lock);
446 * Check if the bucket is outside the high/low water mark and schedule an async
447 * update. The cache refill has hysteresis, once the low water mark is hit it is
448 * refilled up to the high mark.
450 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
452 lockdep_assert_held(&ent->lock);
454 if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
456 if (ent->available_mrs < ent->limit) {
457 ent->fill_to_high_water = true;
458 queue_work(ent->dev->cache.wq, &ent->work);
459 } else if (ent->fill_to_high_water &&
460 ent->available_mrs + ent->pending < 2 * ent->limit) {
462 * Once we start populating due to hitting a low water mark
463 * continue until we pass the high water mark.
465 queue_work(ent->dev->cache.wq, &ent->work);
466 } else if (ent->available_mrs == 2 * ent->limit) {
467 ent->fill_to_high_water = false;
468 } else if (ent->available_mrs > 2 * ent->limit) {
469 /* Queue deletion of excess entries */
470 ent->fill_to_high_water = false;
472 queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
473 msecs_to_jiffies(1000));
475 queue_work(ent->dev->cache.wq, &ent->work);
479 static void __cache_work_func(struct mlx5_cache_ent *ent)
481 struct mlx5_ib_dev *dev = ent->dev;
482 struct mlx5_mr_cache *cache = &dev->cache;
485 spin_lock_irq(&ent->lock);
489 if (ent->fill_to_high_water &&
490 ent->available_mrs + ent->pending < 2 * ent->limit &&
491 !READ_ONCE(dev->fill_delay)) {
492 spin_unlock_irq(&ent->lock);
493 err = add_keys(ent, 1);
494 spin_lock_irq(&ent->lock);
499 * EAGAIN only happens if pending is positive, so we
500 * will be rescheduled from reg_mr_callback(). The only
501 * failure path here is ENOMEM.
503 if (err != -EAGAIN) {
506 "command failed order %d, err %d\n",
508 queue_delayed_work(cache->wq, &ent->dwork,
509 msecs_to_jiffies(1000));
512 } else if (ent->available_mrs > 2 * ent->limit) {
516 * The remove_cache_mr() logic is performed as garbage
517 * collection task. Such task is intended to be run when no
518 * other active processes are running.
520 * The need_resched() will return TRUE if there are user tasks
521 * to be activated in near future.
523 * In such case, we don't execute remove_cache_mr() and postpone
524 * the garbage collection work to try to run in next cycle, in
525 * order to free CPU resources to other tasks.
527 spin_unlock_irq(&ent->lock);
528 need_delay = need_resched() || someone_adding(cache) ||
530 READ_ONCE(cache->last_add) + 300 * HZ);
531 spin_lock_irq(&ent->lock);
535 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
538 remove_cache_mr_locked(ent);
539 queue_adjust_cache_locked(ent);
542 spin_unlock_irq(&ent->lock);
545 static void delayed_cache_work_func(struct work_struct *work)
547 struct mlx5_cache_ent *ent;
549 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
550 __cache_work_func(ent);
553 static void cache_work_func(struct work_struct *work)
555 struct mlx5_cache_ent *ent;
557 ent = container_of(work, struct mlx5_cache_ent, work);
558 __cache_work_func(ent);
561 /* Allocate a special entry from the cache */
562 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
563 unsigned int entry, int access_flags)
565 struct mlx5_mr_cache *cache = &dev->cache;
566 struct mlx5_cache_ent *ent;
567 struct mlx5_ib_mr *mr;
569 if (WARN_ON(entry <= MR_CACHE_LAST_STD_ENTRY ||
570 entry >= ARRAY_SIZE(cache->ent)))
571 return ERR_PTR(-EINVAL);
573 /* Matches access in alloc_cache_mr() */
574 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
575 return ERR_PTR(-EOPNOTSUPP);
577 ent = &cache->ent[entry];
578 spin_lock_irq(&ent->lock);
579 if (list_empty(&ent->head)) {
580 queue_adjust_cache_locked(ent);
582 spin_unlock_irq(&ent->lock);
583 mr = create_cache_mr(ent);
587 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
589 ent->available_mrs--;
590 queue_adjust_cache_locked(ent);
591 spin_unlock_irq(&ent->lock);
593 mr->access_flags = access_flags;
597 /* Return a MR already available in the cache */
598 static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
600 struct mlx5_ib_dev *dev = req_ent->dev;
601 struct mlx5_ib_mr *mr = NULL;
602 struct mlx5_cache_ent *ent = req_ent;
604 /* Try larger MR pools from the cache to satisfy the allocation */
605 for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
606 mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
607 ent - dev->cache.ent);
609 spin_lock_irq(&ent->lock);
610 if (!list_empty(&ent->head)) {
611 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
614 ent->available_mrs--;
615 queue_adjust_cache_locked(ent);
616 spin_unlock_irq(&ent->lock);
619 queue_adjust_cache_locked(ent);
620 spin_unlock_irq(&ent->lock);
629 static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
631 struct mlx5_cache_ent *ent = mr->cache_ent;
633 mr->cache_ent = NULL;
634 spin_lock_irq(&ent->lock);
636 spin_unlock_irq(&ent->lock);
639 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
641 struct mlx5_cache_ent *ent = mr->cache_ent;
646 if (mlx5_mr_cache_invalidate(mr)) {
647 detach_mr_from_cache(mr);
648 destroy_mkey(dev, mr);
653 spin_lock_irq(&ent->lock);
654 list_add_tail(&mr->list, &ent->head);
655 ent->available_mrs++;
656 queue_adjust_cache_locked(ent);
657 spin_unlock_irq(&ent->lock);
660 static void clean_keys(struct mlx5_ib_dev *dev, int c)
662 struct mlx5_mr_cache *cache = &dev->cache;
663 struct mlx5_cache_ent *ent = &cache->ent[c];
664 struct mlx5_ib_mr *tmp_mr;
665 struct mlx5_ib_mr *mr;
668 cancel_delayed_work(&ent->dwork);
670 spin_lock_irq(&ent->lock);
671 if (list_empty(&ent->head)) {
672 spin_unlock_irq(&ent->lock);
675 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
676 list_move(&mr->list, &del_list);
677 ent->available_mrs--;
679 spin_unlock_irq(&ent->lock);
680 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
683 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
689 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
691 if (!mlx5_debugfs_root || dev->is_rep)
694 debugfs_remove_recursive(dev->cache.root);
695 dev->cache.root = NULL;
698 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
700 struct mlx5_mr_cache *cache = &dev->cache;
701 struct mlx5_cache_ent *ent;
705 if (!mlx5_debugfs_root || dev->is_rep)
708 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
710 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
711 ent = &cache->ent[i];
712 sprintf(ent->name, "%d", ent->order);
713 dir = debugfs_create_dir(ent->name, cache->root);
714 debugfs_create_file("size", 0600, dir, ent, &size_fops);
715 debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
716 debugfs_create_u32("cur", 0400, dir, &ent->available_mrs);
717 debugfs_create_u32("miss", 0600, dir, &ent->miss);
721 static void delay_time_func(struct timer_list *t)
723 struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
725 WRITE_ONCE(dev->fill_delay, 0);
728 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
730 struct mlx5_mr_cache *cache = &dev->cache;
731 struct mlx5_cache_ent *ent;
734 mutex_init(&dev->slow_path_mutex);
735 cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
737 mlx5_ib_warn(dev, "failed to create work queue\n");
741 mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
742 timer_setup(&dev->delay_timer, delay_time_func, 0);
743 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
744 ent = &cache->ent[i];
745 INIT_LIST_HEAD(&ent->head);
746 spin_lock_init(&ent->lock);
751 INIT_WORK(&ent->work, cache_work_func);
752 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
754 if (i > MR_CACHE_LAST_STD_ENTRY) {
755 mlx5_odp_init_mr_cache_entry(ent);
759 if (ent->order > mr_cache_max_order(dev))
762 ent->page = PAGE_SHIFT;
763 ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
764 MLX5_IB_UMR_OCTOWORD;
765 ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
766 if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
767 !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
768 mlx5_ib_can_load_pas_with_umr(dev, 0))
769 ent->limit = dev->mdev->profile->mr_cache[i].limit;
772 spin_lock_irq(&ent->lock);
773 queue_adjust_cache_locked(ent);
774 spin_unlock_irq(&ent->lock);
777 mlx5_mr_cache_debugfs_init(dev);
782 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
789 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
790 struct mlx5_cache_ent *ent = &dev->cache.ent[i];
792 spin_lock_irq(&ent->lock);
793 ent->disabled = true;
794 spin_unlock_irq(&ent->lock);
795 cancel_work_sync(&ent->work);
796 cancel_delayed_work_sync(&ent->dwork);
799 mlx5_mr_cache_debugfs_cleanup(dev);
800 mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
802 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
805 destroy_workqueue(dev->cache.wq);
806 del_timer_sync(&dev->delay_timer);
811 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
813 struct mlx5_ib_dev *dev = to_mdev(pd->device);
814 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
815 struct mlx5_ib_mr *mr;
820 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
822 return ERR_PTR(-ENOMEM);
824 in = kzalloc(inlen, GFP_KERNEL);
830 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
832 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
833 MLX5_SET(mkc, mkc, length64, 1);
834 set_mkc_access_pd_addr_fields(mkc, acc, 0, pd);
836 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
841 mr->mmkey.type = MLX5_MKEY_MR;
842 mr->ibmr.lkey = mr->mmkey.key;
843 mr->ibmr.rkey = mr->mmkey.key;
857 static int get_octo_len(u64 addr, u64 len, int page_shift)
859 u64 page_size = 1ULL << page_shift;
863 offset = addr & (page_size - 1);
864 npages = ALIGN(len + offset, page_size) >> page_shift;
865 return (npages + 1) / 2;
868 static int mr_cache_max_order(struct mlx5_ib_dev *dev)
870 if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
871 return MR_CACHE_LAST_STD_ENTRY + 2;
872 return MLX5_MAX_UMR_SHIFT;
875 static int mr_umem_get(struct mlx5_ib_dev *dev, u64 start, u64 length,
876 int access_flags, struct ib_umem **umem, int *npages,
877 int *page_shift, int *ncont, int *order)
883 if (access_flags & IB_ACCESS_ON_DEMAND) {
884 struct ib_umem_odp *odp;
886 odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
889 mlx5_ib_dbg(dev, "umem get failed (%ld)\n",
896 *page_shift = odp->page_shift;
897 *ncont = ib_umem_odp_num_pages(odp);
898 *npages = *ncont << (*page_shift - PAGE_SHIFT);
900 *order = ilog2(roundup_pow_of_two(*ncont));
902 u = ib_umem_get(&dev->ib_dev, start, length, access_flags);
904 mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(u));
908 mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
909 page_shift, ncont, order);
913 mlx5_ib_warn(dev, "avoid zero region\n");
920 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
921 *npages, *ncont, *order, *page_shift);
926 static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
928 struct mlx5_ib_umr_context *context =
929 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
931 context->status = wc->status;
932 complete(&context->done);
935 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
937 context->cqe.done = mlx5_ib_umr_done;
938 context->status = -1;
939 init_completion(&context->done);
942 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
943 struct mlx5_umr_wr *umrwr)
945 struct umr_common *umrc = &dev->umrc;
946 const struct ib_send_wr *bad;
948 struct mlx5_ib_umr_context umr_context;
950 mlx5_ib_init_umr_context(&umr_context);
951 umrwr->wr.wr_cqe = &umr_context.cqe;
954 err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
956 mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
958 wait_for_completion(&umr_context.done);
959 if (umr_context.status != IB_WC_SUCCESS) {
960 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
969 static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
972 struct mlx5_mr_cache *cache = &dev->cache;
974 if (order < cache->ent[0].order)
975 return &cache->ent[0];
976 order = order - cache->ent[0].order;
977 if (order > MR_CACHE_LAST_STD_ENTRY)
979 return &cache->ent[order];
982 static struct mlx5_ib_mr *
983 alloc_mr_from_cache(struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr,
984 u64 len, int npages, int page_shift, unsigned int order,
987 struct mlx5_ib_dev *dev = to_mdev(pd->device);
988 struct mlx5_cache_ent *ent = mr_cache_ent_from_order(dev, order);
989 struct mlx5_ib_mr *mr;
992 return ERR_PTR(-E2BIG);
994 /* Matches access in alloc_cache_mr() */
995 if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags))
996 return ERR_PTR(-EOPNOTSUPP);
998 mr = get_cache_mr(ent);
1000 mr = create_cache_mr(ent);
1007 mr->access_flags = access_flags;
1008 mr->desc_size = sizeof(struct mlx5_mtt);
1009 mr->mmkey.iova = virt_addr;
1010 mr->mmkey.size = len;
1011 mr->mmkey.pd = to_mpd(pd)->pdn;
1016 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
1017 MLX5_UMR_MTT_ALIGNMENT)
1018 #define MLX5_SPARE_UMR_CHUNK 0x10000
1020 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1021 int page_shift, int flags)
1023 struct mlx5_ib_dev *dev = mr->dev;
1024 struct device *ddev = dev->ib_dev.dev.parent;
1028 struct mlx5_umr_wr wr;
1031 int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
1032 ? sizeof(struct mlx5_klm)
1033 : sizeof(struct mlx5_mtt);
1034 const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
1035 const int page_mask = page_align - 1;
1036 size_t pages_mapped = 0;
1037 size_t pages_to_map = 0;
1038 size_t pages_iter = 0;
1039 size_t size_to_map = 0;
1041 bool use_emergency_page = false;
1043 if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
1044 !umr_can_use_indirect_mkey(dev))
1047 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
1048 * so we need to align the offset and length accordingly
1050 if (idx & page_mask) {
1051 npages += idx & page_mask;
1055 gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
1056 gfp |= __GFP_ZERO | __GFP_NOWARN;
1058 pages_to_map = ALIGN(npages, page_align);
1059 size = desc_size * pages_to_map;
1060 size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
1062 xlt = (void *)__get_free_pages(gfp, get_order(size));
1063 if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
1064 mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
1065 size, get_order(size), MLX5_SPARE_UMR_CHUNK);
1067 size = MLX5_SPARE_UMR_CHUNK;
1068 xlt = (void *)__get_free_pages(gfp, get_order(size));
1072 mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
1073 xlt = (void *)mlx5_ib_get_xlt_emergency_page();
1075 memset(xlt, 0, size);
1076 use_emergency_page = true;
1078 pages_iter = size / desc_size;
1079 dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
1080 if (dma_mapping_error(ddev, dma)) {
1081 mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
1086 if (mr->umem->is_odp) {
1087 if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
1088 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
1089 size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
1091 pages_to_map = min_t(size_t, pages_to_map, max_pages);
1096 sg.lkey = dev->umrc.pd->local_dma_lkey;
1098 memset(&wr, 0, sizeof(wr));
1099 wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
1100 if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
1101 wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1102 wr.wr.sg_list = &sg;
1104 wr.wr.opcode = MLX5_IB_WR_UMR;
1106 wr.pd = mr->ibmr.pd;
1107 wr.mkey = mr->mmkey.key;
1108 wr.length = mr->mmkey.size;
1109 wr.virt_addr = mr->mmkey.iova;
1110 wr.access_flags = mr->access_flags;
1111 wr.page_shift = page_shift;
1113 for (pages_mapped = 0;
1114 pages_mapped < pages_to_map && !err;
1115 pages_mapped += pages_iter, idx += pages_iter) {
1116 npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
1117 size_to_map = npages * desc_size;
1118 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
1119 if (mr->umem->is_odp) {
1120 mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
1122 __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx,
1124 MLX5_IB_MTT_PRESENT);
1125 /* Clear padding after the pages
1126 * brought from the umem.
1128 memset(xlt + size_to_map, 0, size - size_to_map);
1130 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1132 sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
1134 if (pages_mapped + pages_iter >= pages_to_map) {
1135 if (flags & MLX5_IB_UPD_XLT_ENABLE)
1137 MLX5_IB_SEND_UMR_ENABLE_MR |
1138 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
1139 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1140 if (flags & MLX5_IB_UPD_XLT_PD ||
1141 flags & MLX5_IB_UPD_XLT_ACCESS)
1143 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1144 if (flags & MLX5_IB_UPD_XLT_ADDR)
1146 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1149 wr.offset = idx * desc_size;
1150 wr.xlt_size = sg.length;
1152 err = mlx5_ib_post_send_wait(dev, &wr);
1154 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1157 if (use_emergency_page)
1158 mlx5_ib_put_xlt_emergency_page();
1160 free_pages((unsigned long)xlt, get_order(size));
1166 * If ibmr is NULL it will be allocated by reg_create.
1167 * Else, the given ibmr will be used.
1169 static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1170 u64 virt_addr, u64 length,
1171 struct ib_umem *umem, int npages,
1172 int page_shift, int access_flags,
1175 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1176 struct mlx5_ib_mr *mr;
1182 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
1184 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1186 return ERR_PTR(-ENOMEM);
1189 mr->access_flags = access_flags;
1191 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1193 inlen += sizeof(*pas) * roundup(npages, 2);
1194 in = kvzalloc(inlen, GFP_KERNEL);
1199 pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
1201 if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
1205 mlx5_ib_populate_pas(dev, umem, page_shift, pas,
1206 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
1209 /* The pg_access bit allows setting the access flags
1210 * in the page list submitted with the command. */
1211 MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
1213 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1214 set_mkc_access_pd_addr_fields(mkc, access_flags, virt_addr,
1215 populate ? pd : dev->umrc.pd);
1216 MLX5_SET(mkc, mkc, free, !populate);
1217 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
1218 MLX5_SET(mkc, mkc, umr_en, 1);
1220 MLX5_SET64(mkc, mkc, len, length);
1221 MLX5_SET(mkc, mkc, bsf_octword_size, 0);
1222 MLX5_SET(mkc, mkc, translations_octword_size,
1223 get_octo_len(virt_addr, length, page_shift));
1224 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1226 MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
1227 get_octo_len(virt_addr, length, page_shift));
1230 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1232 mlx5_ib_warn(dev, "create mkey failed\n");
1235 mr->mmkey.type = MLX5_MKEY_MR;
1236 mr->desc_size = sizeof(struct mlx5_mtt);
1240 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1251 return ERR_PTR(err);
1254 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1255 u64 length, int access_flags)
1257 mr->ibmr.lkey = mr->mmkey.key;
1258 mr->ibmr.rkey = mr->mmkey.key;
1259 mr->ibmr.length = length;
1260 mr->access_flags = access_flags;
1263 static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
1264 u64 length, int acc, int mode)
1266 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1267 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1268 struct mlx5_ib_mr *mr;
1273 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1275 return ERR_PTR(-ENOMEM);
1277 in = kzalloc(inlen, GFP_KERNEL);
1283 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1285 MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
1286 MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
1287 MLX5_SET64(mkc, mkc, len, length);
1288 set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
1290 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1296 set_mr_fields(dev, mr, length, acc);
1306 return ERR_PTR(err);
1309 int mlx5_ib_advise_mr(struct ib_pd *pd,
1310 enum ib_uverbs_advise_mr_advice advice,
1312 struct ib_sge *sg_list,
1314 struct uverbs_attr_bundle *attrs)
1316 if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
1317 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
1318 advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
1321 return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
1325 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1326 struct ib_dm_mr_attr *attr,
1327 struct uverbs_attr_bundle *attrs)
1329 struct mlx5_ib_dm *mdm = to_mdm(dm);
1330 struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
1331 u64 start_addr = mdm->dev_addr + attr->offset;
1334 switch (mdm->type) {
1335 case MLX5_IB_UAPI_DM_TYPE_MEMIC:
1336 if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
1337 return ERR_PTR(-EINVAL);
1339 mode = MLX5_MKC_ACCESS_MODE_MEMIC;
1340 start_addr -= pci_resource_start(dev->pdev, 0);
1342 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
1343 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
1344 if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
1345 return ERR_PTR(-EINVAL);
1347 mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
1350 return ERR_PTR(-EINVAL);
1353 return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
1354 attr->access_flags, mode);
1357 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1358 u64 virt_addr, int access_flags,
1359 struct ib_udata *udata)
1361 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1362 struct mlx5_ib_mr *mr = NULL;
1364 struct ib_umem *umem;
1371 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
1372 return ERR_PTR(-EOPNOTSUPP);
1374 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1375 start, virt_addr, length, access_flags);
1377 xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, length);
1378 /* ODP requires xlt update via umr to work. */
1379 if (!xlt_with_umr && (access_flags & IB_ACCESS_ON_DEMAND))
1380 return ERR_PTR(-EINVAL);
1382 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && !start &&
1383 length == U64_MAX) {
1384 if (virt_addr != start)
1385 return ERR_PTR(-EINVAL);
1386 if (!(access_flags & IB_ACCESS_ON_DEMAND) ||
1387 !(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
1388 return ERR_PTR(-EINVAL);
1390 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
1392 return ERR_CAST(mr);
1396 err = mr_umem_get(dev, start, length, access_flags, &umem,
1397 &npages, &page_shift, &ncont, &order);
1400 return ERR_PTR(err);
1403 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1404 page_shift, order, access_flags);
1410 mutex_lock(&dev->slow_path_mutex);
1411 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1412 page_shift, access_flags, !xlt_with_umr);
1413 mutex_unlock(&dev->slow_path_mutex);
1421 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1424 mr->npages = npages;
1425 atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
1426 set_mr_fields(dev, mr, length, access_flags);
1428 if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
1430 * If the MR was created with reg_create then it will be
1431 * configured properly but left disabled. It is safe to go ahead
1432 * and configure it again via UMR while enabling it.
1434 int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
1436 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1440 return ERR_PTR(err);
1444 if (is_odp_mr(mr)) {
1445 to_ib_umem_odp(mr->umem)->private = mr;
1446 init_waitqueue_head(&mr->q_deferred_work);
1447 atomic_set(&mr->num_deferred_work, 0);
1448 err = xa_err(xa_store(&dev->odp_mkeys,
1449 mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
1453 return ERR_PTR(err);
1456 err = mlx5_ib_init_odp_mr(mr, xlt_with_umr);
1459 return ERR_PTR(err);
1465 ib_umem_release(umem);
1466 return ERR_PTR(err);
1470 * mlx5_mr_cache_invalidate - Fence all DMA on the MR
1471 * @mr: The MR to fence
1473 * Upon return the NIC will not be doing any DMA to the pages under the MR,
1474 * and any DMA inprogress will be completed. Failure of this function
1475 * indicates the HW has failed catastrophically.
1477 int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
1479 struct mlx5_umr_wr umrwr = {};
1481 if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1484 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
1485 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1486 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1487 umrwr.pd = mr->dev->umrc.pd;
1488 umrwr.mkey = mr->mmkey.key;
1489 umrwr.ignore_free_state = 1;
1491 return mlx5_ib_post_send_wait(mr->dev, &umrwr);
1494 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1495 int access_flags, int flags)
1497 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1498 struct mlx5_umr_wr umrwr = {};
1501 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1503 umrwr.wr.opcode = MLX5_IB_WR_UMR;
1504 umrwr.mkey = mr->mmkey.key;
1506 if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
1508 umrwr.access_flags = access_flags;
1509 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
1512 err = mlx5_ib_post_send_wait(dev, &umrwr);
1517 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1518 u64 length, u64 virt_addr, int new_access_flags,
1519 struct ib_pd *new_pd, struct ib_udata *udata)
1521 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1522 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1523 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1524 int access_flags = flags & IB_MR_REREG_ACCESS ?
1535 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1536 start, virt_addr, length, access_flags);
1544 if (flags & IB_MR_REREG_TRANS) {
1548 addr = mr->umem->address;
1549 len = mr->umem->length;
1552 if (flags != IB_MR_REREG_PD) {
1554 * Replace umem. This needs to be done whether or not UMR is
1557 flags |= IB_MR_REREG_TRANS;
1558 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1560 ib_umem_release(mr->umem);
1563 err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
1564 &npages, &page_shift, &ncont, &order);
1568 atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
1571 if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
1573 !mlx5_ib_can_load_pas_with_umr(dev, len) ||
1574 (flags & IB_MR_REREG_TRANS &&
1575 !mlx5_ib_pas_fits_in_mr(mr, addr, len))) {
1577 * UMR can't be used - MKey needs to be replaced.
1580 detach_mr_from_cache(mr);
1581 err = destroy_mkey(dev, mr);
1585 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1586 page_shift, access_flags, true);
1598 mr->access_flags = access_flags;
1599 mr->mmkey.iova = addr;
1600 mr->mmkey.size = len;
1601 mr->mmkey.pd = to_mpd(pd)->pdn;
1603 if (flags & IB_MR_REREG_TRANS) {
1604 upd_flags = MLX5_IB_UPD_XLT_ADDR;
1605 if (flags & IB_MR_REREG_PD)
1606 upd_flags |= MLX5_IB_UPD_XLT_PD;
1607 if (flags & IB_MR_REREG_ACCESS)
1608 upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
1609 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1612 err = rereg_umr(pd, mr, access_flags, flags);
1619 set_mr_fields(dev, mr, len, access_flags);
1624 ib_umem_release(mr->umem);
1632 mlx5_alloc_priv_descs(struct ib_device *device,
1633 struct mlx5_ib_mr *mr,
1637 int size = ndescs * desc_size;
1641 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1643 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1644 if (!mr->descs_alloc)
1647 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1649 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
1650 size, DMA_TO_DEVICE);
1651 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
1658 kfree(mr->descs_alloc);
1664 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1667 struct ib_device *device = mr->ibmr.device;
1668 int size = mr->max_descs * mr->desc_size;
1670 dma_unmap_single(device->dev.parent, mr->desc_map,
1671 size, DMA_TO_DEVICE);
1672 kfree(mr->descs_alloc);
1677 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1680 if (mlx5_core_destroy_psv(dev->mdev,
1681 mr->sig->psv_memory.psv_idx))
1682 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1683 mr->sig->psv_memory.psv_idx);
1684 if (mlx5_core_destroy_psv(dev->mdev,
1685 mr->sig->psv_wire.psv_idx))
1686 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1687 mr->sig->psv_wire.psv_idx);
1688 xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
1693 if (!mr->cache_ent) {
1694 destroy_mkey(dev, mr);
1695 mlx5_free_priv_descs(mr);
1699 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1701 int npages = mr->npages;
1702 struct ib_umem *umem = mr->umem;
1706 mlx5_ib_fence_odp_mr(mr);
1711 mlx5_mr_cache_free(dev, mr);
1715 ib_umem_release(umem);
1716 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1720 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
1722 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1724 if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
1725 dereg_mr(to_mdev(mmr->mtt_mr->ibmr.device), mmr->mtt_mr);
1726 dereg_mr(to_mdev(mmr->klm_mr->ibmr.device), mmr->klm_mr);
1729 if (is_odp_mr(mmr) && to_ib_umem_odp(mmr->umem)->is_implicit_odp) {
1730 mlx5_ib_free_implicit_mr(mmr);
1734 dereg_mr(to_mdev(ibmr->device), mmr);
1739 static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
1740 int access_mode, int page_shift)
1744 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1746 /* This is only used from the kernel, so setting the PD is OK. */
1747 set_mkc_access_pd_addr_fields(mkc, 0, 0, pd);
1748 MLX5_SET(mkc, mkc, free, 1);
1749 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
1750 MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
1751 MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
1752 MLX5_SET(mkc, mkc, umr_en, 1);
1753 MLX5_SET(mkc, mkc, log_page_size, page_shift);
1756 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1757 int ndescs, int desc_size, int page_shift,
1758 int access_mode, u32 *in, int inlen)
1760 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1763 mr->access_mode = access_mode;
1764 mr->desc_size = desc_size;
1765 mr->max_descs = ndescs;
1767 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1771 mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
1773 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1775 goto err_free_descs;
1777 mr->mmkey.type = MLX5_MKEY_MR;
1778 mr->ibmr.lkey = mr->mmkey.key;
1779 mr->ibmr.rkey = mr->mmkey.key;
1784 mlx5_free_priv_descs(mr);
1788 static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
1789 u32 max_num_sg, u32 max_num_meta_sg,
1790 int desc_size, int access_mode)
1792 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1793 int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
1795 struct mlx5_ib_mr *mr;
1799 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1801 return ERR_PTR(-ENOMEM);
1804 mr->ibmr.device = pd->device;
1806 in = kzalloc(inlen, GFP_KERNEL);
1812 if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
1813 page_shift = PAGE_SHIFT;
1815 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
1816 access_mode, in, inlen);
1829 return ERR_PTR(err);
1832 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1833 int ndescs, u32 *in, int inlen)
1835 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
1836 PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
1840 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1841 int ndescs, u32 *in, int inlen)
1843 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
1844 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1847 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1848 int max_num_sg, int max_num_meta_sg,
1851 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1856 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1860 /* create mem & wire PSVs */
1861 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
1865 mr->sig->psv_memory.psv_idx = psv_index[0];
1866 mr->sig->psv_wire.psv_idx = psv_index[1];
1868 mr->sig->sig_status_checked = true;
1869 mr->sig->sig_err_exists = false;
1870 /* Next UMR, Arm SIGERR */
1871 ++mr->sig->sigerr_count;
1872 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1873 sizeof(struct mlx5_klm),
1874 MLX5_MKC_ACCESS_MODE_KLMS);
1875 if (IS_ERR(mr->klm_mr)) {
1876 err = PTR_ERR(mr->klm_mr);
1877 goto err_destroy_psv;
1879 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1880 sizeof(struct mlx5_mtt),
1881 MLX5_MKC_ACCESS_MODE_MTT);
1882 if (IS_ERR(mr->mtt_mr)) {
1883 err = PTR_ERR(mr->mtt_mr);
1884 goto err_free_klm_mr;
1887 /* Set bsf descriptors for mkey */
1888 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1889 MLX5_SET(mkc, mkc, bsf_en, 1);
1890 MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
1892 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
1893 MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
1895 goto err_free_mtt_mr;
1897 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
1898 mr->sig, GFP_KERNEL));
1900 goto err_free_descs;
1904 destroy_mkey(dev, mr);
1905 mlx5_free_priv_descs(mr);
1907 dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
1910 dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
1913 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
1914 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1915 mr->sig->psv_memory.psv_idx);
1916 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1917 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1918 mr->sig->psv_wire.psv_idx);
1925 static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
1926 enum ib_mr_type mr_type, u32 max_num_sg,
1927 u32 max_num_meta_sg)
1929 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1930 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1931 int ndescs = ALIGN(max_num_sg, 4);
1932 struct mlx5_ib_mr *mr;
1936 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1938 return ERR_PTR(-ENOMEM);
1940 in = kzalloc(inlen, GFP_KERNEL);
1946 mr->ibmr.device = pd->device;
1950 case IB_MR_TYPE_MEM_REG:
1951 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
1953 case IB_MR_TYPE_SG_GAPS:
1954 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
1956 case IB_MR_TYPE_INTEGRITY:
1957 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
1958 max_num_meta_sg, in, inlen);
1961 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1976 return ERR_PTR(err);
1979 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1982 return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
1985 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1986 u32 max_num_sg, u32 max_num_meta_sg)
1988 return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
1992 int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
1994 struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
1995 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
1996 struct mlx5_ib_mw *mw = to_mmw(ibmw);
2001 struct mlx5_ib_alloc_mw req = {};
2004 __u32 response_length;
2007 err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
2011 if (req.comp_mask || req.reserved1 || req.reserved2)
2014 if (udata->inlen > sizeof(req) &&
2015 !ib_is_udata_cleared(udata, sizeof(req),
2016 udata->inlen - sizeof(req)))
2019 ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
2021 in = kzalloc(inlen, GFP_KERNEL);
2027 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
2029 MLX5_SET(mkc, mkc, free, 1);
2030 MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
2031 MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
2032 MLX5_SET(mkc, mkc, umr_en, 1);
2033 MLX5_SET(mkc, mkc, lr, 1);
2034 MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
2035 MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
2036 MLX5_SET(mkc, mkc, qpn, 0xffffff);
2038 err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
2042 mw->mmkey.type = MLX5_MKEY_MW;
2043 ibmw->rkey = mw->mmkey.key;
2044 mw->ndescs = ndescs;
2046 resp.response_length =
2047 min(offsetofend(typeof(resp), response_length), udata->outlen);
2048 if (resp.response_length) {
2049 err = ib_copy_to_udata(udata, &resp, resp.response_length);
2054 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
2055 err = xa_err(xa_store(&dev->odp_mkeys,
2056 mlx5_base_mkey(mw->mmkey.key), &mw->mmkey,
2066 mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
2072 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
2074 struct mlx5_ib_dev *dev = to_mdev(mw->device);
2075 struct mlx5_ib_mw *mmw = to_mmw(mw);
2077 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
2078 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key));
2080 * pagefault_single_data_segment() may be accessing mmw under
2081 * SRCU if the user bound an ODP MR to this MW.
2083 synchronize_srcu(&dev->odp_srcu);
2086 return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
2089 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
2090 struct ib_mr_status *mr_status)
2092 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
2095 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
2096 pr_err("Invalid status check mask\n");
2101 mr_status->fail_status = 0;
2102 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
2105 pr_err("signature status check requested on a non-signature enabled MR\n");
2109 mmr->sig->sig_status_checked = true;
2110 if (!mmr->sig->sig_err_exists)
2113 if (ibmr->lkey == mmr->sig->err_item.key)
2114 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
2115 sizeof(mr_status->sig_err));
2117 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
2118 mr_status->sig_err.sig_err_offset = 0;
2119 mr_status->sig_err.key = mmr->sig->err_item.key;
2122 mmr->sig->sig_err_exists = false;
2123 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
2131 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2132 int data_sg_nents, unsigned int *data_sg_offset,
2133 struct scatterlist *meta_sg, int meta_sg_nents,
2134 unsigned int *meta_sg_offset)
2136 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2137 unsigned int sg_offset = 0;
2140 mr->meta_length = 0;
2141 if (data_sg_nents == 1) {
2145 sg_offset = *data_sg_offset;
2146 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2147 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2148 if (meta_sg_nents == 1) {
2150 mr->meta_ndescs = 1;
2152 sg_offset = *meta_sg_offset;
2155 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2156 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2158 ibmr->length = mr->data_length + mr->meta_length;
2165 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2166 struct scatterlist *sgl,
2167 unsigned short sg_nents,
2168 unsigned int *sg_offset_p,
2169 struct scatterlist *meta_sgl,
2170 unsigned short meta_sg_nents,
2171 unsigned int *meta_sg_offset_p)
2173 struct scatterlist *sg = sgl;
2174 struct mlx5_klm *klms = mr->descs;
2175 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2176 u32 lkey = mr->ibmr.pd->local_dma_lkey;
2179 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2180 mr->ibmr.length = 0;
2182 for_each_sg(sgl, sg, sg_nents, i) {
2183 if (unlikely(i >= mr->max_descs))
2185 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2186 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
2187 klms[i].key = cpu_to_be32(lkey);
2188 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2194 *sg_offset_p = sg_offset;
2197 mr->data_length = mr->ibmr.length;
2199 if (meta_sg_nents) {
2201 sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
2202 for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
2203 if (unlikely(i + j >= mr->max_descs))
2205 klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
2207 klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
2209 klms[i + j].key = cpu_to_be32(lkey);
2210 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2214 if (meta_sg_offset_p)
2215 *meta_sg_offset_p = sg_offset;
2217 mr->meta_ndescs = j;
2218 mr->meta_length = mr->ibmr.length - mr->data_length;
2224 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
2226 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2229 if (unlikely(mr->ndescs == mr->max_descs))
2233 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2238 static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
2240 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2243 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
2247 descs[mr->ndescs + mr->meta_ndescs++] =
2248 cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2254 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2255 int data_sg_nents, unsigned int *data_sg_offset,
2256 struct scatterlist *meta_sg, int meta_sg_nents,
2257 unsigned int *meta_sg_offset)
2259 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2260 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2264 pi_mr->meta_ndescs = 0;
2265 pi_mr->meta_length = 0;
2267 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2268 pi_mr->desc_size * pi_mr->max_descs,
2271 pi_mr->ibmr.page_size = ibmr->page_size;
2272 n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
2274 if (n != data_sg_nents)
2277 pi_mr->data_iova = pi_mr->ibmr.iova;
2278 pi_mr->data_length = pi_mr->ibmr.length;
2279 pi_mr->ibmr.length = pi_mr->data_length;
2280 ibmr->length = pi_mr->data_length;
2282 if (meta_sg_nents) {
2283 u64 page_mask = ~((u64)ibmr->page_size - 1);
2284 u64 iova = pi_mr->data_iova;
2286 n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
2287 meta_sg_offset, mlx5_set_page_pi);
2289 pi_mr->meta_length = pi_mr->ibmr.length;
2291 * PI address for the HW is the offset of the metadata address
2292 * relative to the first data page address.
2293 * It equals to first data page address + size of data pages +
2294 * metadata offset at the first metadata page
2296 pi_mr->pi_iova = (iova & page_mask) +
2297 pi_mr->ndescs * ibmr->page_size +
2298 (pi_mr->ibmr.iova & ~page_mask);
2300 * In order to use one MTT MR for data and metadata, we register
2301 * also the gaps between the end of the data and the start of
2302 * the metadata (the sig MR will verify that the HW will access
2303 * to right addresses). This mapping is safe because we use
2304 * internal mkey for the registration.
2306 pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
2307 pi_mr->ibmr.iova = iova;
2308 ibmr->length += pi_mr->meta_length;
2311 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2312 pi_mr->desc_size * pi_mr->max_descs,
2319 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2320 int data_sg_nents, unsigned int *data_sg_offset,
2321 struct scatterlist *meta_sg, int meta_sg_nents,
2322 unsigned int *meta_sg_offset)
2324 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2325 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2329 pi_mr->meta_ndescs = 0;
2330 pi_mr->meta_length = 0;
2332 ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
2333 pi_mr->desc_size * pi_mr->max_descs,
2336 n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
2337 meta_sg, meta_sg_nents, meta_sg_offset);
2339 ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
2340 pi_mr->desc_size * pi_mr->max_descs,
2343 /* This is zero-based memory region */
2344 pi_mr->data_iova = 0;
2345 pi_mr->ibmr.iova = 0;
2346 pi_mr->pi_iova = pi_mr->data_length;
2347 ibmr->length = pi_mr->ibmr.length;
2352 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
2353 int data_sg_nents, unsigned int *data_sg_offset,
2354 struct scatterlist *meta_sg, int meta_sg_nents,
2355 unsigned int *meta_sg_offset)
2357 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2358 struct mlx5_ib_mr *pi_mr = NULL;
2361 WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
2364 mr->data_length = 0;
2366 mr->meta_ndescs = 0;
2369 * As a performance optimization, if possible, there is no need to
2370 * perform UMR operation to register the data/metadata buffers.
2371 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2372 * Fallback to UMR only in case of a failure.
2374 n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2375 data_sg_offset, meta_sg, meta_sg_nents,
2377 if (n == data_sg_nents + meta_sg_nents)
2380 * As a performance optimization, if possible, there is no need to map
2381 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2382 * descriptors and fallback to KLM only in case of a failure.
2383 * It's more efficient for the HW to work with MTT descriptors
2384 * (especially in high load).
2385 * Use KLM (indirect access) only if it's mandatory.
2388 n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2389 data_sg_offset, meta_sg, meta_sg_nents,
2391 if (n == data_sg_nents + meta_sg_nents)
2395 n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
2396 data_sg_offset, meta_sg, meta_sg_nents,
2398 if (unlikely(n != data_sg_nents + meta_sg_nents))
2402 /* This is zero-based memory region */
2406 ibmr->sig_attrs->meta_length = pi_mr->meta_length;
2408 ibmr->sig_attrs->meta_length = mr->meta_length;
2413 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2414 unsigned int *sg_offset)
2416 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2421 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2422 mr->desc_size * mr->max_descs,
2425 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2426 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2429 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
2432 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2433 mr->desc_size * mr->max_descs,