GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / infiniband / hw / hns / hns_roce_db.c
1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
2 /*
3  * Copyright (c) 2017 Hisilicon Limited.
4  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5  */
6
7 #include <linux/platform_device.h>
8 #include <rdma/ib_umem.h>
9 #include "hns_roce_device.h"
10
11 int hns_roce_db_map_user(struct hns_roce_ucontext *context, unsigned long virt,
12                          struct hns_roce_db *db)
13 {
14         struct hns_roce_user_db_page *page;
15         int ret = 0;
16
17         mutex_lock(&context->page_mutex);
18
19         list_for_each_entry(page, &context->page_list, list)
20                 if (page->user_virt == (virt & PAGE_MASK))
21                         goto found;
22
23         page = kmalloc(sizeof(*page), GFP_KERNEL);
24         if (!page) {
25                 ret = -ENOMEM;
26                 goto out;
27         }
28
29         refcount_set(&page->refcount, 1);
30         page->user_virt = (virt & PAGE_MASK);
31         page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
32                                  PAGE_SIZE, 0, 0);
33         if (IS_ERR(page->umem)) {
34                 ret = PTR_ERR(page->umem);
35                 kfree(page);
36                 goto out;
37         }
38
39         list_add(&page->list, &context->page_list);
40
41 found:
42         db->dma = sg_dma_address(page->umem->sg_head.sgl) +
43                   (virt & ~PAGE_MASK);
44         page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
45         db->virt_addr = sg_virt(page->umem->sg_head.sgl);
46         db->u.user_page = page;
47         refcount_inc(&page->refcount);
48
49 out:
50         mutex_unlock(&context->page_mutex);
51
52         return ret;
53 }
54 EXPORT_SYMBOL(hns_roce_db_map_user);
55
56 void hns_roce_db_unmap_user(struct hns_roce_ucontext *context,
57                             struct hns_roce_db *db)
58 {
59         mutex_lock(&context->page_mutex);
60
61         refcount_dec(&db->u.user_page->refcount);
62         if (refcount_dec_if_one(&db->u.user_page->refcount)) {
63                 list_del(&db->u.user_page->list);
64                 ib_umem_release(db->u.user_page->umem);
65                 kfree(db->u.user_page);
66         }
67
68         mutex_unlock(&context->page_mutex);
69 }
70 EXPORT_SYMBOL(hns_roce_db_unmap_user);
71
72 static struct hns_roce_db_pgdir *hns_roce_alloc_db_pgdir(
73                                         struct device *dma_device)
74 {
75         struct hns_roce_db_pgdir *pgdir;
76
77         pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
78         if (!pgdir)
79                 return NULL;
80
81         bitmap_fill(pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2);
82         pgdir->bits[0] = pgdir->order0;
83         pgdir->bits[1] = pgdir->order1;
84         pgdir->page = dma_alloc_coherent(dma_device, PAGE_SIZE,
85                                          &pgdir->db_dma, GFP_KERNEL);
86         if (!pgdir->page) {
87                 kfree(pgdir);
88                 return NULL;
89         }
90
91         return pgdir;
92 }
93
94 static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir *pgdir,
95                                         struct hns_roce_db *db, int order)
96 {
97         int o;
98         int i;
99
100         for (o = order; o <= 1; ++o) {
101                 i = find_first_bit(pgdir->bits[o], HNS_ROCE_DB_PER_PAGE >> o);
102                 if (i < HNS_ROCE_DB_PER_PAGE >> o)
103                         goto found;
104         }
105
106         return -ENOMEM;
107
108 found:
109         clear_bit(i, pgdir->bits[o]);
110
111         i <<= o;
112
113         if (o > order)
114                 set_bit(i ^ 1, pgdir->bits[order]);
115
116         db->u.pgdir     = pgdir;
117         db->index       = i;
118         db->db_record   = pgdir->page + db->index;
119         db->dma         = pgdir->db_dma  + db->index * 4;
120         db->order       = order;
121
122         return 0;
123 }
124
125 int hns_roce_alloc_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db,
126                       int order)
127 {
128         struct hns_roce_db_pgdir *pgdir;
129         int ret = 0;
130
131         mutex_lock(&hr_dev->pgdir_mutex);
132
133         list_for_each_entry(pgdir, &hr_dev->pgdir_list, list)
134                 if (!hns_roce_alloc_db_from_pgdir(pgdir, db, order))
135                         goto out;
136
137         pgdir = hns_roce_alloc_db_pgdir(hr_dev->dev);
138         if (!pgdir) {
139                 ret = -ENOMEM;
140                 goto out;
141         }
142
143         list_add(&pgdir->list, &hr_dev->pgdir_list);
144
145         /* This should never fail -- we just allocated an empty page: */
146         WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir, db, order));
147
148 out:
149         mutex_unlock(&hr_dev->pgdir_mutex);
150
151         return ret;
152 }
153 EXPORT_SYMBOL_GPL(hns_roce_alloc_db);
154
155 void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db)
156 {
157         int o;
158         int i;
159
160         mutex_lock(&hr_dev->pgdir_mutex);
161
162         o = db->order;
163         i = db->index;
164
165         if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
166                 clear_bit(i ^ 1, db->u.pgdir->order0);
167                 ++o;
168         }
169
170         i >>= o;
171         set_bit(i, db->u.pgdir->bits[o]);
172
173         if (bitmap_full(db->u.pgdir->order1, HNS_ROCE_DB_PER_PAGE / 2)) {
174                 dma_free_coherent(hr_dev->dev, PAGE_SIZE, db->u.pgdir->page,
175                                   db->u.pgdir->db_dma);
176                 list_del(&db->u.pgdir->list);
177                 kfree(db->u.pgdir);
178         }
179
180         mutex_unlock(&hr_dev->pgdir_mutex);
181 }
182 EXPORT_SYMBOL_GPL(hns_roce_free_db);