GNU Linux-libre 4.4.289-gnu1
[releases.git] / drivers / net / ethernet / mellanox / mlx5 / core / pagealloc.c
1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include "mlx5_core.h"
39
40 enum {
41         MLX5_PAGES_CANT_GIVE    = 0,
42         MLX5_PAGES_GIVE         = 1,
43         MLX5_PAGES_TAKE         = 2
44 };
45
46 enum {
47         MLX5_BOOT_PAGES         = 1,
48         MLX5_INIT_PAGES         = 2,
49         MLX5_POST_INIT_PAGES    = 3
50 };
51
52 struct mlx5_pages_req {
53         struct mlx5_core_dev *dev;
54         u16     func_id;
55         s32     npages;
56         struct work_struct work;
57 };
58
59 struct fw_page {
60         struct rb_node          rb_node;
61         u64                     addr;
62         struct page            *page;
63         u16                     func_id;
64         unsigned long           bitmask;
65         struct list_head        list;
66         unsigned                free_count;
67 };
68
69 struct mlx5_query_pages_inbox {
70         struct mlx5_inbox_hdr   hdr;
71         u8                      rsvd[8];
72 };
73
74 struct mlx5_query_pages_outbox {
75         struct mlx5_outbox_hdr  hdr;
76         __be16                  rsvd;
77         __be16                  func_id;
78         __be32                  num_pages;
79 };
80
81 struct mlx5_manage_pages_inbox {
82         struct mlx5_inbox_hdr   hdr;
83         __be16                  rsvd;
84         __be16                  func_id;
85         __be32                  num_entries;
86         __be64                  pas[0];
87 };
88
89 struct mlx5_manage_pages_outbox {
90         struct mlx5_outbox_hdr  hdr;
91         __be32                  num_entries;
92         u8                      rsvd[4];
93         __be64                  pas[0];
94 };
95
96 enum {
97         MAX_RECLAIM_TIME_MSECS  = 5000,
98 };
99
100 enum {
101         MLX5_MAX_RECLAIM_TIME_MILI      = 5000,
102         MLX5_NUM_4K_IN_PAGE             = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
103 };
104
105 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
106 {
107         struct rb_root *root = &dev->priv.page_root;
108         struct rb_node **new = &root->rb_node;
109         struct rb_node *parent = NULL;
110         struct fw_page *nfp;
111         struct fw_page *tfp;
112         int i;
113
114         while (*new) {
115                 parent = *new;
116                 tfp = rb_entry(parent, struct fw_page, rb_node);
117                 if (tfp->addr < addr)
118                         new = &parent->rb_left;
119                 else if (tfp->addr > addr)
120                         new = &parent->rb_right;
121                 else
122                         return -EEXIST;
123         }
124
125         nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
126         if (!nfp)
127                 return -ENOMEM;
128
129         nfp->addr = addr;
130         nfp->page = page;
131         nfp->func_id = func_id;
132         nfp->free_count = MLX5_NUM_4K_IN_PAGE;
133         for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
134                 set_bit(i, &nfp->bitmask);
135
136         rb_link_node(&nfp->rb_node, parent, new);
137         rb_insert_color(&nfp->rb_node, root);
138         list_add(&nfp->list, &dev->priv.free_list);
139
140         return 0;
141 }
142
143 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
144 {
145         struct rb_root *root = &dev->priv.page_root;
146         struct rb_node *tmp = root->rb_node;
147         struct fw_page *result = NULL;
148         struct fw_page *tfp;
149
150         while (tmp) {
151                 tfp = rb_entry(tmp, struct fw_page, rb_node);
152                 if (tfp->addr < addr) {
153                         tmp = tmp->rb_left;
154                 } else if (tfp->addr > addr) {
155                         tmp = tmp->rb_right;
156                 } else {
157                         result = tfp;
158                         break;
159                 }
160         }
161
162         return result;
163 }
164
165 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
166                                 s32 *npages, int boot)
167 {
168         struct mlx5_query_pages_inbox   in;
169         struct mlx5_query_pages_outbox  out;
170         int err;
171
172         memset(&in, 0, sizeof(in));
173         memset(&out, 0, sizeof(out));
174         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
175         in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
176
177         err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
178         if (err)
179                 return err;
180
181         if (out.hdr.status)
182                 return mlx5_cmd_status_to_err(&out.hdr);
183
184         *npages = be32_to_cpu(out.num_pages);
185         *func_id = be16_to_cpu(out.func_id);
186
187         return err;
188 }
189
190 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
191 {
192         struct fw_page *fp;
193         unsigned n;
194
195         if (list_empty(&dev->priv.free_list))
196                 return -ENOMEM;
197
198         fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
199         n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
200         if (n >= MLX5_NUM_4K_IN_PAGE) {
201                 mlx5_core_warn(dev, "alloc 4k bug\n");
202                 return -ENOENT;
203         }
204         clear_bit(n, &fp->bitmask);
205         fp->free_count--;
206         if (!fp->free_count)
207                 list_del(&fp->list);
208
209         *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
210
211         return 0;
212 }
213
214 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
215
216 static void free_4k(struct mlx5_core_dev *dev, u64 addr)
217 {
218         struct fw_page *fwp;
219         int n;
220
221         fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
222         if (!fwp) {
223                 mlx5_core_warn(dev, "page not found\n");
224                 return;
225         }
226
227         n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
228         fwp->free_count++;
229         set_bit(n, &fwp->bitmask);
230         if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
231                 rb_erase(&fwp->rb_node, &dev->priv.page_root);
232                 if (fwp->free_count != 1)
233                         list_del(&fwp->list);
234                 dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
235                                PAGE_SIZE, DMA_BIDIRECTIONAL);
236                 __free_page(fwp->page);
237                 kfree(fwp);
238         } else if (fwp->free_count == 1) {
239                 list_add(&fwp->list, &dev->priv.free_list);
240         }
241 }
242
243 static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
244 {
245         struct page *page;
246         u64 zero_addr = 1;
247         u64 addr;
248         int err;
249         int nid = dev_to_node(&dev->pdev->dev);
250
251         page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
252         if (!page) {
253                 mlx5_core_warn(dev, "failed to allocate page\n");
254                 return -ENOMEM;
255         }
256 map:
257         addr = dma_map_page(&dev->pdev->dev, page, 0,
258                             PAGE_SIZE, DMA_BIDIRECTIONAL);
259         if (dma_mapping_error(&dev->pdev->dev, addr)) {
260                 mlx5_core_warn(dev, "failed dma mapping page\n");
261                 err = -ENOMEM;
262                 goto err_mapping;
263         }
264
265         /* Firmware doesn't support page with physical address 0 */
266         if (addr == 0) {
267                 zero_addr = addr;
268                 goto map;
269         }
270
271         err = insert_page(dev, addr, page, func_id);
272         if (err) {
273                 mlx5_core_err(dev, "failed to track allocated page\n");
274                 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
275                                DMA_BIDIRECTIONAL);
276         }
277
278 err_mapping:
279         if (err)
280                 __free_page(page);
281
282         if (zero_addr == 0)
283                 dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
284                                DMA_BIDIRECTIONAL);
285
286         return err;
287 }
288
289 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
290 {
291         struct mlx5_manage_pages_inbox *in;
292         struct mlx5_manage_pages_outbox out;
293         int err;
294
295         in = kzalloc(sizeof(*in), GFP_KERNEL);
296         if (!in)
297                 return;
298
299         memset(&out, 0, sizeof(out));
300         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
301         in->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
302         in->func_id = cpu_to_be16(func_id);
303         err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
304         if (!err)
305                 err = mlx5_cmd_status_to_err(&out.hdr);
306
307         if (err)
308                 mlx5_core_warn(dev, "page notify failed\n");
309
310         kfree(in);
311 }
312
313 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
314                       int notify_fail)
315 {
316         struct mlx5_manage_pages_inbox *in;
317         struct mlx5_manage_pages_outbox out;
318         int inlen;
319         u64 addr;
320         int err;
321         int i;
322
323         inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
324         in = mlx5_vzalloc(inlen);
325         if (!in) {
326                 err = -ENOMEM;
327                 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
328                 goto out_free;
329         }
330         memset(&out, 0, sizeof(out));
331
332         for (i = 0; i < npages; i++) {
333 retry:
334                 err = alloc_4k(dev, &addr);
335                 if (err) {
336                         if (err == -ENOMEM)
337                                 err = alloc_system_page(dev, func_id);
338                         if (err)
339                                 goto out_4k;
340
341                         goto retry;
342                 }
343                 in->pas[i] = cpu_to_be64(addr);
344         }
345
346         in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
347         in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
348         in->func_id = cpu_to_be16(func_id);
349         in->num_entries = cpu_to_be32(npages);
350         err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
351         if (err) {
352                 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
353                                func_id, npages, err);
354                 goto out_4k;
355         }
356         dev->priv.fw_pages += npages;
357
358         err = mlx5_cmd_status_to_err(&out.hdr);
359         if (err) {
360                 mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
361                                func_id, npages, out.hdr.status);
362                 goto out_4k;
363         }
364
365         mlx5_core_dbg(dev, "err %d\n", err);
366
367         kvfree(in);
368         return 0;
369
370 out_4k:
371         for (i--; i >= 0; i--)
372                 free_4k(dev, be64_to_cpu(in->pas[i]));
373 out_free:
374         kvfree(in);
375         if (notify_fail)
376                 page_notify_fail(dev, func_id);
377         return err;
378 }
379
380 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
381                          int *nclaimed)
382 {
383         struct mlx5_manage_pages_inbox   in;
384         struct mlx5_manage_pages_outbox *out;
385         int num_claimed;
386         int outlen;
387         u64 addr;
388         int err;
389         int i;
390
391         if (nclaimed)
392                 *nclaimed = 0;
393
394         memset(&in, 0, sizeof(in));
395         outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
396         out = mlx5_vzalloc(outlen);
397         if (!out)
398                 return -ENOMEM;
399
400         in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
401         in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
402         in.func_id = cpu_to_be16(func_id);
403         in.num_entries = cpu_to_be32(npages);
404         mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
405         err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
406         if (err) {
407                 mlx5_core_err(dev, "failed reclaiming pages\n");
408                 goto out_free;
409         }
410         dev->priv.fw_pages -= npages;
411
412         if (out->hdr.status) {
413                 err = mlx5_cmd_status_to_err(&out->hdr);
414                 goto out_free;
415         }
416
417         num_claimed = be32_to_cpu(out->num_entries);
418         if (nclaimed)
419                 *nclaimed = num_claimed;
420
421         for (i = 0; i < num_claimed; i++) {
422                 addr = be64_to_cpu(out->pas[i]);
423                 free_4k(dev, addr);
424         }
425
426 out_free:
427         kvfree(out);
428         return err;
429 }
430
431 static void pages_work_handler(struct work_struct *work)
432 {
433         struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
434         struct mlx5_core_dev *dev = req->dev;
435         int err = 0;
436
437         if (req->npages < 0)
438                 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
439         else if (req->npages > 0)
440                 err = give_pages(dev, req->func_id, req->npages, 1);
441
442         if (err)
443                 mlx5_core_warn(dev, "%s fail %d\n",
444                                req->npages < 0 ? "reclaim" : "give", err);
445
446         kfree(req);
447 }
448
449 void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
450                                  s32 npages)
451 {
452         struct mlx5_pages_req *req;
453
454         req = kzalloc(sizeof(*req), GFP_ATOMIC);
455         if (!req) {
456                 mlx5_core_warn(dev, "failed to allocate pages request\n");
457                 return;
458         }
459
460         req->dev = dev;
461         req->func_id = func_id;
462         req->npages = npages;
463         INIT_WORK(&req->work, pages_work_handler);
464         queue_work(dev->priv.pg_wq, &req->work);
465 }
466
467 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
468 {
469         u16 uninitialized_var(func_id);
470         s32 uninitialized_var(npages);
471         int err;
472
473         err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
474         if (err)
475                 return err;
476
477         mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
478                       npages, boot ? "boot" : "init", func_id);
479
480         return give_pages(dev, func_id, npages, 0);
481 }
482
483 enum {
484         MLX5_BLKS_FOR_RECLAIM_PAGES = 12
485 };
486
487 static int optimal_reclaimed_pages(void)
488 {
489         struct mlx5_cmd_prot_block *block;
490         struct mlx5_cmd_layout *lay;
491         int ret;
492
493         ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
494                sizeof(struct mlx5_manage_pages_outbox)) /
495                FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
496
497         return ret;
498 }
499
500 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
501 {
502         unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
503         struct fw_page *fwp;
504         struct rb_node *p;
505         int nclaimed = 0;
506         int err = 0;
507
508         do {
509                 p = rb_first(&dev->priv.page_root);
510                 if (p) {
511                         fwp = rb_entry(p, struct fw_page, rb_node);
512                         if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
513                                 free_4k(dev, fwp->addr);
514                                 nclaimed = 1;
515                         } else {
516                                 err = reclaim_pages(dev, fwp->func_id,
517                                                     optimal_reclaimed_pages(),
518                                                     &nclaimed);
519                         }
520                         if (err) {
521                                 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
522                                                err);
523                                 return err;
524                         }
525                         if (nclaimed)
526                                 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
527                 }
528                 if (time_after(jiffies, end)) {
529                         mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
530                         break;
531                 }
532         } while (p);
533
534         return 0;
535 }
536
537 void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
538 {
539         dev->priv.page_root = RB_ROOT;
540         INIT_LIST_HEAD(&dev->priv.free_list);
541 }
542
543 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
544 {
545         /* nothing */
546 }
547
548 int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
549 {
550         dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
551         if (!dev->priv.pg_wq)
552                 return -ENOMEM;
553
554         return 0;
555 }
556
557 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
558 {
559         destroy_workqueue(dev->priv.pg_wq);
560 }