2 * Copyright(c) 2020 Cornelis Networks, Inc.
3 * Copyright(c) 2016 - 2017 Intel Corporation.
5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Redistribution and use in source and binary forms, with or without
22 * modification, are permitted provided that the following conditions
25 * - Redistributions of source code must retain the above copyright
26 * notice, this list of conditions and the following disclaimer.
27 * - Redistributions in binary form must reproduce the above copyright
28 * notice, this list of conditions and the following disclaimer in
29 * the documentation and/or other materials provided with the
31 * - Neither the name of Intel Corporation nor the names of its
32 * contributors may be used to endorse or promote products derived
33 * from this software without specific prior written permission.
35 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/list.h>
49 #include <linux/rculist.h>
50 #include <linux/mmu_notifier.h>
51 #include <linux/interval_tree_generic.h>
52 #include <linux/sched/mm.h>
57 static unsigned long mmu_node_start(struct mmu_rb_node *);
58 static unsigned long mmu_node_last(struct mmu_rb_node *);
59 static int mmu_notifier_range_start(struct mmu_notifier *,
60 const struct mmu_notifier_range *);
61 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
62 unsigned long, unsigned long);
63 static void release_immediate(struct kref *refcount);
64 static void handle_remove(struct work_struct *work);
66 static const struct mmu_notifier_ops mn_opts = {
67 .invalidate_range_start = mmu_notifier_range_start,
70 INTERVAL_TREE_DEFINE(struct mmu_rb_node, node, unsigned long, __last,
71 mmu_node_start, mmu_node_last, static, __mmu_int_rb);
73 static unsigned long mmu_node_start(struct mmu_rb_node *node)
75 return node->addr & PAGE_MASK;
78 static unsigned long mmu_node_last(struct mmu_rb_node *node)
80 return PAGE_ALIGN(node->addr + node->len) - 1;
83 int hfi1_mmu_rb_register(void *ops_arg,
84 struct mmu_rb_ops *ops,
85 struct workqueue_struct *wq,
86 struct mmu_rb_handler **handler)
88 struct mmu_rb_handler *h;
91 h = kzalloc(sizeof(*h), GFP_KERNEL);
95 h->root = RB_ROOT_CACHED;
98 INIT_HLIST_NODE(&h->mn.hlist);
99 spin_lock_init(&h->lock);
100 h->mn.ops = &mn_opts;
101 INIT_WORK(&h->del_work, handle_remove);
102 INIT_LIST_HEAD(&h->del_list);
103 INIT_LIST_HEAD(&h->lru_list);
106 ret = mmu_notifier_register(&h->mn, current->mm);
116 void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
118 struct mmu_rb_node *rbnode;
119 struct rb_node *node;
121 struct list_head del_list;
123 /* Prevent freeing of mm until we are completely finished. */
124 mmgrab(handler->mn.mm);
126 /* Unregister first so we don't get any more notifications. */
127 mmu_notifier_unregister(&handler->mn, handler->mn.mm);
130 * Make sure the wq delete handler is finished running. It will not
131 * be triggered once the mmu notifiers are unregistered above.
133 flush_work(&handler->del_work);
135 INIT_LIST_HEAD(&del_list);
137 spin_lock_irqsave(&handler->lock, flags);
138 while ((node = rb_first_cached(&handler->root))) {
139 rbnode = rb_entry(node, struct mmu_rb_node, node);
140 rb_erase_cached(node, &handler->root);
141 /* move from LRU list to delete list */
142 list_move(&rbnode->list, &del_list);
144 spin_unlock_irqrestore(&handler->lock, flags);
146 while (!list_empty(&del_list)) {
147 rbnode = list_first_entry(&del_list, struct mmu_rb_node, list);
148 list_del(&rbnode->list);
149 kref_put(&rbnode->refcount, release_immediate);
152 /* Now the mm may be freed. */
153 mmdrop(handler->mn.mm);
158 int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
159 struct mmu_rb_node *mnode)
161 struct mmu_rb_node *node;
165 trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
167 if (current->mm != handler->mn.mm)
170 spin_lock_irqsave(&handler->lock, flags);
171 node = __mmu_rb_search(handler, mnode->addr, mnode->len);
176 __mmu_int_rb_insert(mnode, &handler->root);
177 list_add_tail(&mnode->list, &handler->lru_list);
178 mnode->handler = handler;
180 spin_unlock_irqrestore(&handler->lock, flags);
184 /* Caller must hold handler lock */
185 struct mmu_rb_node *hfi1_mmu_rb_get_first(struct mmu_rb_handler *handler,
186 unsigned long addr, unsigned long len)
188 struct mmu_rb_node *node;
190 trace_hfi1_mmu_rb_search(addr, len);
191 node = __mmu_int_rb_iter_first(&handler->root, addr, (addr + len) - 1);
193 list_move_tail(&node->list, &handler->lru_list);
197 /* Caller must hold handler lock */
198 static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
202 struct mmu_rb_node *node = NULL;
204 trace_hfi1_mmu_rb_search(addr, len);
205 if (!handler->ops->filter) {
206 node = __mmu_int_rb_iter_first(&handler->root, addr,
209 for (node = __mmu_int_rb_iter_first(&handler->root, addr,
212 node = __mmu_int_rb_iter_next(node, addr,
214 if (handler->ops->filter(node, addr, len))
222 * Must NOT call while holding mnode->handler->lock.
223 * mnode->handler->ops->remove() may sleep and mnode->handler->lock is a
226 static void release_immediate(struct kref *refcount)
228 struct mmu_rb_node *mnode =
229 container_of(refcount, struct mmu_rb_node, refcount);
230 mnode->handler->ops->remove(mnode->handler->ops_arg, mnode);
233 /* Caller must hold mnode->handler->lock */
234 static void release_nolock(struct kref *refcount)
236 struct mmu_rb_node *mnode =
237 container_of(refcount, struct mmu_rb_node, refcount);
238 list_move(&mnode->list, &mnode->handler->del_list);
239 queue_work(mnode->handler->wq, &mnode->handler->del_work);
243 * struct mmu_rb_node->refcount kref_put() callback.
244 * Adds mmu_rb_node to mmu_rb_node->handler->del_list and queues
245 * handler->del_work on handler->wq.
246 * Does not remove mmu_rb_node from handler->lru_list or handler->rb_root.
247 * Acquires mmu_rb_node->handler->lock; do not call while already holding
250 void hfi1_mmu_rb_release(struct kref *refcount)
252 struct mmu_rb_node *mnode =
253 container_of(refcount, struct mmu_rb_node, refcount);
254 struct mmu_rb_handler *handler = mnode->handler;
257 spin_lock_irqsave(&handler->lock, flags);
258 list_move(&mnode->list, &mnode->handler->del_list);
259 spin_unlock_irqrestore(&handler->lock, flags);
260 queue_work(handler->wq, &handler->del_work);
263 void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
265 struct mmu_rb_node *rbnode, *ptr;
266 struct list_head del_list;
270 if (current->mm != handler->mn.mm)
273 INIT_LIST_HEAD(&del_list);
275 spin_lock_irqsave(&handler->lock, flags);
276 list_for_each_entry_safe(rbnode, ptr, &handler->lru_list, list) {
277 /* refcount == 1 implies mmu_rb_handler has only rbnode ref */
278 if (kref_read(&rbnode->refcount) > 1)
281 if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
283 __mmu_int_rb_remove(rbnode, &handler->root);
284 /* move from LRU list to delete list */
285 list_move(&rbnode->list, &del_list);
290 spin_unlock_irqrestore(&handler->lock, flags);
292 list_for_each_entry_safe(rbnode, ptr, &del_list, list) {
293 kref_put(&rbnode->refcount, release_immediate);
297 static int mmu_notifier_range_start(struct mmu_notifier *mn,
298 const struct mmu_notifier_range *range)
300 struct mmu_rb_handler *handler =
301 container_of(mn, struct mmu_rb_handler, mn);
302 struct rb_root_cached *root = &handler->root;
303 struct mmu_rb_node *node, *ptr = NULL;
306 spin_lock_irqsave(&handler->lock, flags);
307 for (node = __mmu_int_rb_iter_first(root, range->start, range->end-1);
309 /* Guard against node removal. */
310 ptr = __mmu_int_rb_iter_next(node, range->start,
312 trace_hfi1_mmu_mem_invalidate(node->addr, node->len);
313 /* Remove from rb tree and lru_list. */
314 __mmu_int_rb_remove(node, root);
315 list_del_init(&node->list);
316 kref_put(&node->refcount, release_nolock);
318 spin_unlock_irqrestore(&handler->lock, flags);
324 * Work queue function to remove all nodes that have been queued up to
325 * be removed. The key feature is that mm->mmap_lock is not being held
326 * and the remove callback can sleep while taking it, if needed.
328 static void handle_remove(struct work_struct *work)
330 struct mmu_rb_handler *handler = container_of(work,
331 struct mmu_rb_handler,
333 struct list_head del_list;
335 struct mmu_rb_node *node;
337 /* remove anything that is queued to get removed */
338 spin_lock_irqsave(&handler->lock, flags);
339 list_replace_init(&handler->del_list, &del_list);
340 spin_unlock_irqrestore(&handler->lock, flags);
342 while (!list_empty(&del_list)) {
343 node = list_first_entry(&del_list, struct mmu_rb_node, list);
344 list_del(&node->list);
345 handler->ops->remove(handler->ops_arg, node);