GNU Linux-libre 4.14.328-gnu1
[releases.git] / drivers / infiniband / hw / vmw_pvrdma / pvrdma_misc.c
1 /*
2  * Copyright (c) 2012-2016 VMware, Inc.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of EITHER the GNU General Public License
6  * version 2 as published by the Free Software Foundation or the BSD
7  * 2-Clause License. This program is distributed in the hope that it
8  * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9  * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10  * See the GNU General Public License version 2 for more details at
11  * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program available in the file COPYING in the main
15  * directory of this source tree.
16  *
17  * The BSD 2-Clause License
18  *
19  *     Redistribution and use in source and binary forms, with or
20  *     without modification, are permitted provided that the following
21  *     conditions are met:
22  *
23  *      - Redistributions of source code must retain the above
24  *        copyright notice, this list of conditions and the following
25  *        disclaimer.
26  *
27  *      - Redistributions in binary form must reproduce the above
28  *        copyright notice, this list of conditions and the following
29  *        disclaimer in the documentation and/or other materials
30  *        provided with the distribution.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43  * OF THE POSSIBILITY OF SUCH DAMAGE.
44  */
45
46 #include <linux/errno.h>
47 #include <linux/slab.h>
48 #include <linux/bitmap.h>
49
50 #include "pvrdma.h"
51
52 int pvrdma_page_dir_init(struct pvrdma_dev *dev, struct pvrdma_page_dir *pdir,
53                          u64 npages, bool alloc_pages)
54 {
55         u64 i;
56
57         if (npages > PVRDMA_PAGE_DIR_MAX_PAGES)
58                 return -EINVAL;
59
60         memset(pdir, 0, sizeof(*pdir));
61
62         pdir->dir = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
63                                        &pdir->dir_dma, GFP_KERNEL);
64         if (!pdir->dir)
65                 goto err;
66
67         pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1;
68         pdir->tables = kcalloc(pdir->ntables, sizeof(*pdir->tables),
69                                GFP_KERNEL);
70         if (!pdir->tables)
71                 goto err;
72
73         for (i = 0; i < pdir->ntables; i++) {
74                 pdir->tables[i] = dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
75                                                 (dma_addr_t *)&pdir->dir[i],
76                                                 GFP_KERNEL);
77                 if (!pdir->tables[i])
78                         goto err;
79         }
80
81         pdir->npages = npages;
82
83         if (alloc_pages) {
84                 pdir->pages = kcalloc(npages, sizeof(*pdir->pages),
85                                       GFP_KERNEL);
86                 if (!pdir->pages)
87                         goto err;
88
89                 for (i = 0; i < pdir->npages; i++) {
90                         dma_addr_t page_dma;
91
92                         pdir->pages[i] = dma_alloc_coherent(&dev->pdev->dev,
93                                                             PAGE_SIZE,
94                                                             &page_dma,
95                                                             GFP_KERNEL);
96                         if (!pdir->pages[i])
97                                 goto err;
98
99                         pvrdma_page_dir_insert_dma(pdir, i, page_dma);
100                 }
101         }
102
103         return 0;
104
105 err:
106         pvrdma_page_dir_cleanup(dev, pdir);
107
108         return -ENOMEM;
109 }
110
111 static u64 *pvrdma_page_dir_table(struct pvrdma_page_dir *pdir, u64 idx)
112 {
113         return pdir->tables[PVRDMA_PAGE_DIR_TABLE(idx)];
114 }
115
116 dma_addr_t pvrdma_page_dir_get_dma(struct pvrdma_page_dir *pdir, u64 idx)
117 {
118         return pvrdma_page_dir_table(pdir, idx)[PVRDMA_PAGE_DIR_PAGE(idx)];
119 }
120
121 static void pvrdma_page_dir_cleanup_pages(struct pvrdma_dev *dev,
122                                           struct pvrdma_page_dir *pdir)
123 {
124         if (pdir->pages) {
125                 u64 i;
126
127                 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) {
128                         dma_addr_t page_dma = pvrdma_page_dir_get_dma(pdir, i);
129
130                         dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
131                                           pdir->pages[i], page_dma);
132                 }
133
134                 kfree(pdir->pages);
135         }
136 }
137
138 static void pvrdma_page_dir_cleanup_tables(struct pvrdma_dev *dev,
139                                            struct pvrdma_page_dir *pdir)
140 {
141         if (pdir->tables) {
142                 int i;
143
144                 pvrdma_page_dir_cleanup_pages(dev, pdir);
145
146                 for (i = 0; i < pdir->ntables; i++) {
147                         u64 *table = pdir->tables[i];
148
149                         if (table)
150                                 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
151                                                   table, pdir->dir[i]);
152                 }
153
154                 kfree(pdir->tables);
155         }
156 }
157
158 void pvrdma_page_dir_cleanup(struct pvrdma_dev *dev,
159                              struct pvrdma_page_dir *pdir)
160 {
161         if (pdir->dir) {
162                 pvrdma_page_dir_cleanup_tables(dev, pdir);
163                 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
164                                   pdir->dir, pdir->dir_dma);
165         }
166 }
167
168 int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
169                                dma_addr_t daddr)
170 {
171         u64 *table;
172
173         if (idx >= pdir->npages)
174                 return -EINVAL;
175
176         table = pvrdma_page_dir_table(pdir, idx);
177         table[PVRDMA_PAGE_DIR_PAGE(idx)] = daddr;
178
179         return 0;
180 }
181
182 int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
183                                 struct ib_umem *umem, u64 offset)
184 {
185         u64 i = offset;
186         int j, entry;
187         int ret = 0, len = 0;
188         struct scatterlist *sg;
189
190         if (offset >= pdir->npages)
191                 return -EINVAL;
192
193         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
194                 len = sg_dma_len(sg) >> PAGE_SHIFT;
195                 for (j = 0; j < len; j++) {
196                         dma_addr_t addr = sg_dma_address(sg) +
197                                           (j << umem->page_shift);
198
199                         ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
200                         if (ret)
201                                 goto exit;
202
203                         i++;
204                 }
205         }
206
207 exit:
208         return ret;
209 }
210
211 int pvrdma_page_dir_insert_page_list(struct pvrdma_page_dir *pdir,
212                                      u64 *page_list,
213                                      int num_pages)
214 {
215         int i;
216         int ret;
217
218         if (num_pages > pdir->npages)
219                 return -EINVAL;
220
221         for (i = 0; i < num_pages; i++) {
222                 ret = pvrdma_page_dir_insert_dma(pdir, i, page_list[i]);
223                 if (ret)
224                         return ret;
225         }
226
227         return 0;
228 }
229
230 void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, const struct pvrdma_qp_cap *src)
231 {
232         dst->max_send_wr = src->max_send_wr;
233         dst->max_recv_wr = src->max_recv_wr;
234         dst->max_send_sge = src->max_send_sge;
235         dst->max_recv_sge = src->max_recv_sge;
236         dst->max_inline_data = src->max_inline_data;
237 }
238
239 void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, const struct ib_qp_cap *src)
240 {
241         dst->max_send_wr = src->max_send_wr;
242         dst->max_recv_wr = src->max_recv_wr;
243         dst->max_send_sge = src->max_send_sge;
244         dst->max_recv_sge = src->max_recv_sge;
245         dst->max_inline_data = src->max_inline_data;
246 }
247
248 void pvrdma_gid_to_ib(union ib_gid *dst, const union pvrdma_gid *src)
249 {
250         BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
251         memcpy(dst, src, sizeof(*src));
252 }
253
254 void ib_gid_to_pvrdma(union pvrdma_gid *dst, const union ib_gid *src)
255 {
256         BUILD_BUG_ON(sizeof(union pvrdma_gid) != sizeof(union ib_gid));
257         memcpy(dst, src, sizeof(*src));
258 }
259
260 void pvrdma_global_route_to_ib(struct ib_global_route *dst,
261                                const struct pvrdma_global_route *src)
262 {
263         pvrdma_gid_to_ib(&dst->dgid, &src->dgid);
264         dst->flow_label = src->flow_label;
265         dst->sgid_index = src->sgid_index;
266         dst->hop_limit = src->hop_limit;
267         dst->traffic_class = src->traffic_class;
268 }
269
270 void ib_global_route_to_pvrdma(struct pvrdma_global_route *dst,
271                                const struct ib_global_route *src)
272 {
273         ib_gid_to_pvrdma(&dst->dgid, &src->dgid);
274         dst->flow_label = src->flow_label;
275         dst->sgid_index = src->sgid_index;
276         dst->hop_limit = src->hop_limit;
277         dst->traffic_class = src->traffic_class;
278 }
279
280 void pvrdma_ah_attr_to_rdma(struct rdma_ah_attr *dst,
281                             const struct pvrdma_ah_attr *src)
282 {
283         dst->type = RDMA_AH_ATTR_TYPE_ROCE;
284         pvrdma_global_route_to_ib(rdma_ah_retrieve_grh(dst), &src->grh);
285         rdma_ah_set_dlid(dst, src->dlid);
286         rdma_ah_set_sl(dst, src->sl);
287         rdma_ah_set_path_bits(dst, src->src_path_bits);
288         rdma_ah_set_static_rate(dst, src->static_rate);
289         rdma_ah_set_ah_flags(dst, src->ah_flags);
290         rdma_ah_set_port_num(dst, src->port_num);
291         memcpy(dst->roce.dmac, &src->dmac, ETH_ALEN);
292 }
293
294 void rdma_ah_attr_to_pvrdma(struct pvrdma_ah_attr *dst,
295                             const struct rdma_ah_attr *src)
296 {
297         ib_global_route_to_pvrdma(&dst->grh, rdma_ah_read_grh(src));
298         dst->dlid = rdma_ah_get_dlid(src);
299         dst->sl = rdma_ah_get_sl(src);
300         dst->src_path_bits = rdma_ah_get_path_bits(src);
301         dst->static_rate = rdma_ah_get_static_rate(src);
302         dst->ah_flags = rdma_ah_get_ah_flags(src);
303         dst->port_num = rdma_ah_get_port_num(src);
304         memcpy(&dst->dmac, src->roce.dmac, sizeof(dst->dmac));
305 }
306
307 u8 ib_gid_type_to_pvrdma(enum ib_gid_type gid_type)
308 {
309         return (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ?
310                 PVRDMA_GID_TYPE_FLAG_ROCE_V2 :
311                 PVRDMA_GID_TYPE_FLAG_ROCE_V1;
312 }