GNU Linux-libre 4.19.304-gnu1
[releases.git] / fs / orangefs / orangefs-bufmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) 2001 Clemson University and The University of Chicago
4  *
5  * See COPYING in top-level directory.
6  */
7 #include "protocol.h"
8 #include "orangefs-kernel.h"
9 #include "orangefs-bufmap.h"
10
11 struct slot_map {
12         int c;
13         wait_queue_head_t q;
14         int count;
15         unsigned long *map;
16 };
17
18 static struct slot_map rw_map = {
19         .c = -1,
20         .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q)
21 };
22 static struct slot_map readdir_map = {
23         .c = -1,
24         .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q)
25 };
26
27
28 static void install(struct slot_map *m, int count, unsigned long *map)
29 {
30         spin_lock(&m->q.lock);
31         m->c = m->count = count;
32         m->map = map;
33         wake_up_all_locked(&m->q);
34         spin_unlock(&m->q.lock);
35 }
36
37 static void mark_killed(struct slot_map *m)
38 {
39         spin_lock(&m->q.lock);
40         m->c -= m->count + 1;
41         spin_unlock(&m->q.lock);
42 }
43
44 static void run_down(struct slot_map *m)
45 {
46         DEFINE_WAIT(wait);
47         spin_lock(&m->q.lock);
48         if (m->c != -1) {
49                 for (;;) {
50                         if (likely(list_empty(&wait.entry)))
51                                 __add_wait_queue_entry_tail(&m->q, &wait);
52                         set_current_state(TASK_UNINTERRUPTIBLE);
53
54                         if (m->c == -1)
55                                 break;
56
57                         spin_unlock(&m->q.lock);
58                         schedule();
59                         spin_lock(&m->q.lock);
60                 }
61                 __remove_wait_queue(&m->q, &wait);
62                 __set_current_state(TASK_RUNNING);
63         }
64         m->map = NULL;
65         spin_unlock(&m->q.lock);
66 }
67
68 static void put(struct slot_map *m, int slot)
69 {
70         int v;
71         spin_lock(&m->q.lock);
72         __clear_bit(slot, m->map);
73         v = ++m->c;
74         if (v > 0)
75                 wake_up_locked(&m->q);
76         if (unlikely(v == -1))     /* finished dying */
77                 wake_up_all_locked(&m->q);
78         spin_unlock(&m->q.lock);
79 }
80
81 static int wait_for_free(struct slot_map *m)
82 {
83         long left = slot_timeout_secs * HZ;
84         DEFINE_WAIT(wait);
85
86         do {
87                 long n = left, t;
88                 if (likely(list_empty(&wait.entry)))
89                         __add_wait_queue_entry_tail_exclusive(&m->q, &wait);
90                 set_current_state(TASK_INTERRUPTIBLE);
91
92                 if (m->c > 0)
93                         break;
94
95                 if (m->c < 0) {
96                         /* we are waiting for map to be installed */
97                         /* it would better be there soon, or we go away */
98                         if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ)
99                                 n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ;
100                 }
101                 spin_unlock(&m->q.lock);
102                 t = schedule_timeout(n);
103                 spin_lock(&m->q.lock);
104                 if (unlikely(!t) && n != left && m->c < 0)
105                         left = t;
106                 else
107                         left = t + (left - n);
108                 if (unlikely(signal_pending(current)))
109                         left = -EINTR;
110         } while (left > 0);
111
112         if (!list_empty(&wait.entry))
113                 list_del(&wait.entry);
114         else if (left <= 0 && waitqueue_active(&m->q))
115                 __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
116         __set_current_state(TASK_RUNNING);
117
118         if (likely(left > 0))
119                 return 0;
120
121         return left < 0 ? -EINTR : -ETIMEDOUT;
122 }
123
124 static int get(struct slot_map *m)
125 {
126         int res = 0;
127         spin_lock(&m->q.lock);
128         if (unlikely(m->c <= 0))
129                 res = wait_for_free(m);
130         if (likely(!res)) {
131                 m->c--;
132                 res = find_first_zero_bit(m->map, m->count);
133                 __set_bit(res, m->map);
134         }
135         spin_unlock(&m->q.lock);
136         return res;
137 }
138
139 /* used to describe mapped buffers */
140 struct orangefs_bufmap_desc {
141         void __user *uaddr;             /* user space address pointer */
142         struct page **page_array;       /* array of mapped pages */
143         int array_count;                /* size of above arrays */
144         struct list_head list_link;
145 };
146
147 static struct orangefs_bufmap {
148         int desc_size;
149         int desc_shift;
150         int desc_count;
151         int total_size;
152         int page_count;
153
154         struct page **page_array;
155         struct orangefs_bufmap_desc *desc_array;
156
157         /* array to track usage of buffer descriptors */
158         unsigned long *buffer_index_array;
159
160         /* array to track usage of buffer descriptors for readdir */
161 #define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG)
162         unsigned long readdir_index_array[N];
163 #undef N
164 } *__orangefs_bufmap;
165
166 static DEFINE_SPINLOCK(orangefs_bufmap_lock);
167
168 static void
169 orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
170 {
171         int i;
172
173         for (i = 0; i < bufmap->page_count; i++)
174                 put_page(bufmap->page_array[i]);
175 }
176
177 static void
178 orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
179 {
180         kfree(bufmap->page_array);
181         kfree(bufmap->desc_array);
182         bitmap_free(bufmap->buffer_index_array);
183         kfree(bufmap);
184 }
185
186 /*
187  * XXX: Can the size and shift change while the caller gives up the
188  * XXX: lock between calling this and doing something useful?
189  */
190
191 int orangefs_bufmap_size_query(void)
192 {
193         struct orangefs_bufmap *bufmap;
194         int size = 0;
195         spin_lock(&orangefs_bufmap_lock);
196         bufmap = __orangefs_bufmap;
197         if (bufmap)
198                 size = bufmap->desc_size;
199         spin_unlock(&orangefs_bufmap_lock);
200         return size;
201 }
202
203 int orangefs_bufmap_shift_query(void)
204 {
205         struct orangefs_bufmap *bufmap;
206         int shift = 0;
207         spin_lock(&orangefs_bufmap_lock);
208         bufmap = __orangefs_bufmap;
209         if (bufmap)
210                 shift = bufmap->desc_shift;
211         spin_unlock(&orangefs_bufmap_lock);
212         return shift;
213 }
214
215 static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq);
216 static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq);
217
218 static struct orangefs_bufmap *
219 orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
220 {
221         struct orangefs_bufmap *bufmap;
222
223         bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL);
224         if (!bufmap)
225                 goto out;
226
227         bufmap->total_size = user_desc->total_size;
228         bufmap->desc_count = user_desc->count;
229         bufmap->desc_size = user_desc->size;
230         bufmap->desc_shift = ilog2(bufmap->desc_size);
231
232         bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL);
233         if (!bufmap->buffer_index_array)
234                 goto out_free_bufmap;
235
236         bufmap->desc_array =
237                 kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc),
238                         GFP_KERNEL);
239         if (!bufmap->desc_array)
240                 goto out_free_index_array;
241
242         bufmap->page_count = bufmap->total_size / PAGE_SIZE;
243
244         /* allocate storage to track our page mappings */
245         bufmap->page_array =
246                 kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL);
247         if (!bufmap->page_array)
248                 goto out_free_desc_array;
249
250         return bufmap;
251
252 out_free_desc_array:
253         kfree(bufmap->desc_array);
254 out_free_index_array:
255         bitmap_free(bufmap->buffer_index_array);
256 out_free_bufmap:
257         kfree(bufmap);
258 out:
259         return NULL;
260 }
261
262 static int
263 orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
264                 struct ORANGEFS_dev_map_desc *user_desc)
265 {
266         int pages_per_desc = bufmap->desc_size / PAGE_SIZE;
267         int offset = 0, ret, i;
268
269         /* map the pages */
270         ret = get_user_pages_fast((unsigned long)user_desc->ptr,
271                              bufmap->page_count, 1, bufmap->page_array);
272
273         if (ret < 0)
274                 return ret;
275
276         if (ret != bufmap->page_count) {
277                 gossip_err("orangefs error: asked for %d pages, only got %d.\n",
278                                 bufmap->page_count, ret);
279
280                 for (i = 0; i < ret; i++) {
281                         SetPageError(bufmap->page_array[i]);
282                         put_page(bufmap->page_array[i]);
283                 }
284                 return -ENOMEM;
285         }
286
287         /*
288          * ideally we want to get kernel space pointers for each page, but
289          * we can't kmap that many pages at once if highmem is being used.
290          * so instead, we just kmap/kunmap the page address each time the
291          * kaddr is needed.
292          */
293         for (i = 0; i < bufmap->page_count; i++)
294                 flush_dcache_page(bufmap->page_array[i]);
295
296         /* build a list of available descriptors */
297         for (offset = 0, i = 0; i < bufmap->desc_count; i++) {
298                 bufmap->desc_array[i].page_array = &bufmap->page_array[offset];
299                 bufmap->desc_array[i].array_count = pages_per_desc;
300                 bufmap->desc_array[i].uaddr =
301                     (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE));
302                 offset += pages_per_desc;
303         }
304
305         return 0;
306 }
307
308 /*
309  * orangefs_bufmap_initialize()
310  *
311  * initializes the mapped buffer interface
312  *
313  * returns 0 on success, -errno on failure
314  */
315 int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc)
316 {
317         struct orangefs_bufmap *bufmap;
318         int ret = -EINVAL;
319
320         gossip_debug(GOSSIP_BUFMAP_DEBUG,
321                      "orangefs_bufmap_initialize: called (ptr ("
322                      "%p) sz (%d) cnt(%d).\n",
323                      user_desc->ptr,
324                      user_desc->size,
325                      user_desc->count);
326
327         if (user_desc->total_size < 0 ||
328             user_desc->size < 0 ||
329             user_desc->count < 0)
330                 goto out;
331
332         /*
333          * sanity check alignment and size of buffer that caller wants to
334          * work with
335          */
336         if (PAGE_ALIGN((unsigned long)user_desc->ptr) !=
337             (unsigned long)user_desc->ptr) {
338                 gossip_err("orangefs error: memory alignment (front). %p\n",
339                            user_desc->ptr);
340                 goto out;
341         }
342
343         if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size))
344             != (unsigned long)(user_desc->ptr + user_desc->total_size)) {
345                 gossip_err("orangefs error: memory alignment (back).(%p + %d)\n",
346                            user_desc->ptr,
347                            user_desc->total_size);
348                 goto out;
349         }
350
351         if (user_desc->total_size != (user_desc->size * user_desc->count)) {
352                 gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n",
353                            user_desc->total_size,
354                            user_desc->size,
355                            user_desc->count);
356                 goto out;
357         }
358
359         if ((user_desc->size % PAGE_SIZE) != 0) {
360                 gossip_err("orangefs error: bufmap size not page size divisible (%d).\n",
361                            user_desc->size);
362                 goto out;
363         }
364
365         ret = -ENOMEM;
366         bufmap = orangefs_bufmap_alloc(user_desc);
367         if (!bufmap)
368                 goto out;
369
370         ret = orangefs_bufmap_map(bufmap, user_desc);
371         if (ret)
372                 goto out_free_bufmap;
373
374
375         spin_lock(&orangefs_bufmap_lock);
376         if (__orangefs_bufmap) {
377                 spin_unlock(&orangefs_bufmap_lock);
378                 gossip_err("orangefs: error: bufmap already initialized.\n");
379                 ret = -EINVAL;
380                 goto out_unmap_bufmap;
381         }
382         __orangefs_bufmap = bufmap;
383         install(&rw_map,
384                 bufmap->desc_count,
385                 bufmap->buffer_index_array);
386         install(&readdir_map,
387                 ORANGEFS_READDIR_DEFAULT_DESC_COUNT,
388                 bufmap->readdir_index_array);
389         spin_unlock(&orangefs_bufmap_lock);
390
391         gossip_debug(GOSSIP_BUFMAP_DEBUG,
392                      "orangefs_bufmap_initialize: exiting normally\n");
393         return 0;
394
395 out_unmap_bufmap:
396         orangefs_bufmap_unmap(bufmap);
397 out_free_bufmap:
398         orangefs_bufmap_free(bufmap);
399 out:
400         return ret;
401 }
402
403 /*
404  * orangefs_bufmap_finalize()
405  *
406  * shuts down the mapped buffer interface and releases any resources
407  * associated with it
408  *
409  * no return value
410  */
411 void orangefs_bufmap_finalize(void)
412 {
413         struct orangefs_bufmap *bufmap = __orangefs_bufmap;
414         if (!bufmap)
415                 return;
416         gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n");
417         mark_killed(&rw_map);
418         mark_killed(&readdir_map);
419         gossip_debug(GOSSIP_BUFMAP_DEBUG,
420                      "orangefs_bufmap_finalize: exiting normally\n");
421 }
422
423 void orangefs_bufmap_run_down(void)
424 {
425         struct orangefs_bufmap *bufmap = __orangefs_bufmap;
426         if (!bufmap)
427                 return;
428         run_down(&rw_map);
429         run_down(&readdir_map);
430         spin_lock(&orangefs_bufmap_lock);
431         __orangefs_bufmap = NULL;
432         spin_unlock(&orangefs_bufmap_lock);
433         orangefs_bufmap_unmap(bufmap);
434         orangefs_bufmap_free(bufmap);
435 }
436
437 /*
438  * orangefs_bufmap_get()
439  *
440  * gets a free mapped buffer descriptor, will sleep until one becomes
441  * available if necessary
442  *
443  * returns slot on success, -errno on failure
444  */
445 int orangefs_bufmap_get(void)
446 {
447         return get(&rw_map);
448 }
449
450 /*
451  * orangefs_bufmap_put()
452  *
453  * returns a mapped buffer descriptor to the collection
454  *
455  * no return value
456  */
457 void orangefs_bufmap_put(int buffer_index)
458 {
459         put(&rw_map, buffer_index);
460 }
461
462 /*
463  * orangefs_readdir_index_get()
464  *
465  * gets a free descriptor, will sleep until one becomes
466  * available if necessary.
467  * Although the readdir buffers are not mapped into kernel space
468  * we could do that at a later point of time. Regardless, these
469  * indices are used by the client-core.
470  *
471  * returns slot on success, -errno on failure
472  */
473 int orangefs_readdir_index_get(void)
474 {
475         return get(&readdir_map);
476 }
477
478 void orangefs_readdir_index_put(int buffer_index)
479 {
480         put(&readdir_map, buffer_index);
481 }
482
483 /*
484  * we've been handed an iovec, we need to copy it to
485  * the shared memory descriptor at "buffer_index".
486  */
487 int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
488                                 int buffer_index,
489                                 size_t size)
490 {
491         struct orangefs_bufmap_desc *to;
492         int i;
493
494         gossip_debug(GOSSIP_BUFMAP_DEBUG,
495                      "%s: buffer_index:%d: size:%zu:\n",
496                      __func__, buffer_index, size);
497
498         to = &__orangefs_bufmap->desc_array[buffer_index];
499         for (i = 0; size; i++) {
500                 struct page *page = to->page_array[i];
501                 size_t n = size;
502                 if (n > PAGE_SIZE)
503                         n = PAGE_SIZE;
504                 if (copy_page_from_iter(page, 0, n, iter) != n)
505                         return -EFAULT;
506                 size -= n;
507         }
508         return 0;
509 }
510
511 /*
512  * we've been handed an iovec, we need to fill it from
513  * the shared memory descriptor at "buffer_index".
514  */
515 int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
516                                     int buffer_index,
517                                     size_t size)
518 {
519         struct orangefs_bufmap_desc *from;
520         int i;
521
522         from = &__orangefs_bufmap->desc_array[buffer_index];
523         gossip_debug(GOSSIP_BUFMAP_DEBUG,
524                      "%s: buffer_index:%d: size:%zu:\n",
525                      __func__, buffer_index, size);
526
527
528         for (i = 0; size; i++) {
529                 struct page *page = from->page_array[i];
530                 size_t n = size;
531                 if (n > PAGE_SIZE)
532                         n = PAGE_SIZE;
533                 n = copy_page_to_iter(page, 0, n, iter);
534                 if (!n)
535                         return -EFAULT;
536                 size -= n;
537         }
538         return 0;
539 }