2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <linux/sched.h>
50 #include <linux/device.h>
51 #include <linux/module.h>
55 static unsigned long cache_size = 256;
56 module_param(cache_size, ulong, S_IRUGO | S_IWUSR);
57 MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
60 * Determine whether the caller can pin pages.
62 * This function should be used in the implementation of buffer caches.
63 * The cache implementation should call this function prior to attempting
64 * to pin buffer pages in order to determine whether they should do so.
65 * The function computes cache limits based on the configured ulimit and
66 * cache size. Use of this function is especially important for caches
67 * which are not limited in any other way (e.g. by HW resources) and, thus,
68 * could keeping caching buffers.
71 bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
72 u32 nlocked, u32 npages)
74 unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
75 size = (cache_size * (1UL << 20)); /* convert to bytes */
76 unsigned usr_ctxts = dd->num_rcv_contexts - dd->first_user_ctxt;
77 bool can_lock = capable(CAP_IPC_LOCK);
80 * Calculate per-cache size. The calculation below uses only a quarter
81 * of the available per-context limit. This leaves space for other
82 * pinning. Should we worry about shared ctxts?
84 cache_limit = (ulimit / usr_ctxts) / 4;
86 /* If ulimit isn't set to "unlimited" and is smaller than cache_size. */
87 if (ulimit != (-1UL) && size > cache_limit)
90 /* Convert to number of pages */
91 size = DIV_ROUND_UP(size, PAGE_SIZE);
93 down_read(&mm->mmap_sem);
94 pinned = mm->pinned_vm;
95 up_read(&mm->mmap_sem);
97 /* First, check the absolute limit against all pinned pages. */
98 if (pinned + npages >= ulimit && !can_lock)
101 return ((nlocked + npages) <= size) || can_lock;
104 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
105 bool writable, struct page **pages)
109 ret = get_user_pages_fast(vaddr, npages, writable, pages);
113 down_write(&mm->mmap_sem);
114 mm->pinned_vm += ret;
115 up_write(&mm->mmap_sem);
120 void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
121 size_t npages, bool dirty)
125 for (i = 0; i < npages; i++) {
127 set_page_dirty_lock(p[i]);
131 if (mm) { /* during close after signal, mm can be NULL */
132 down_write(&mm->mmap_sem);
133 mm->pinned_vm -= npages;
134 up_write(&mm->mmap_sem);