Mention branches and keyring.
[releases.git] / core / umem_dmabuf.c
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (c) 2020 Intel Corporation. All rights reserved.
4  */
5
6 #include <linux/dma-buf.h>
7 #include <linux/dma-resv.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/module.h>
10
11 #include "uverbs.h"
12
13 MODULE_IMPORT_NS(DMA_BUF);
14
15 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
16 {
17         struct sg_table *sgt;
18         struct scatterlist *sg;
19         unsigned long start, end, cur = 0;
20         unsigned int nmap = 0;
21         long ret;
22         int i;
23
24         dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
25
26         if (umem_dmabuf->sgt)
27                 goto wait_fence;
28
29         sgt = dma_buf_map_attachment(umem_dmabuf->attach,
30                                      DMA_BIDIRECTIONAL);
31         if (IS_ERR(sgt))
32                 return PTR_ERR(sgt);
33
34         /* modify the sg list in-place to match umem address and length */
35
36         start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE);
37         end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length,
38                     PAGE_SIZE);
39         for_each_sgtable_dma_sg(sgt, sg, i) {
40                 if (start < cur + sg_dma_len(sg) && cur < end)
41                         nmap++;
42                 if (cur <= start && start < cur + sg_dma_len(sg)) {
43                         unsigned long offset = start - cur;
44
45                         umem_dmabuf->first_sg = sg;
46                         umem_dmabuf->first_sg_offset = offset;
47                         sg_dma_address(sg) += offset;
48                         sg_dma_len(sg) -= offset;
49                         cur += offset;
50                 }
51                 if (cur < end && end <= cur + sg_dma_len(sg)) {
52                         unsigned long trim = cur + sg_dma_len(sg) - end;
53
54                         umem_dmabuf->last_sg = sg;
55                         umem_dmabuf->last_sg_trim = trim;
56                         sg_dma_len(sg) -= trim;
57                         break;
58                 }
59                 cur += sg_dma_len(sg);
60         }
61
62         umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg;
63         umem_dmabuf->umem.sgt_append.sgt.nents = nmap;
64         umem_dmabuf->sgt = sgt;
65
66 wait_fence:
67         /*
68          * Although the sg list is valid now, the content of the pages
69          * may be not up-to-date. Wait for the exporter to finish
70          * the migration.
71          */
72         ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv,
73                                      DMA_RESV_USAGE_KERNEL,
74                                      false, MAX_SCHEDULE_TIMEOUT);
75         if (ret < 0)
76                 return ret;
77         if (ret == 0)
78                 return -ETIMEDOUT;
79         return 0;
80 }
81 EXPORT_SYMBOL(ib_umem_dmabuf_map_pages);
82
83 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
84 {
85         dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
86
87         if (!umem_dmabuf->sgt)
88                 return;
89
90         /* retore the original sg list */
91         if (umem_dmabuf->first_sg) {
92                 sg_dma_address(umem_dmabuf->first_sg) -=
93                         umem_dmabuf->first_sg_offset;
94                 sg_dma_len(umem_dmabuf->first_sg) +=
95                         umem_dmabuf->first_sg_offset;
96                 umem_dmabuf->first_sg = NULL;
97                 umem_dmabuf->first_sg_offset = 0;
98         }
99         if (umem_dmabuf->last_sg) {
100                 sg_dma_len(umem_dmabuf->last_sg) +=
101                         umem_dmabuf->last_sg_trim;
102                 umem_dmabuf->last_sg = NULL;
103                 umem_dmabuf->last_sg_trim = 0;
104         }
105
106         dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
107                                  DMA_BIDIRECTIONAL);
108
109         umem_dmabuf->sgt = NULL;
110 }
111 EXPORT_SYMBOL(ib_umem_dmabuf_unmap_pages);
112
113 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
114                                           unsigned long offset, size_t size,
115                                           int fd, int access,
116                                           const struct dma_buf_attach_ops *ops)
117 {
118         struct dma_buf *dmabuf;
119         struct ib_umem_dmabuf *umem_dmabuf;
120         struct ib_umem *umem;
121         unsigned long end;
122         struct ib_umem_dmabuf *ret = ERR_PTR(-EINVAL);
123
124         if (check_add_overflow(offset, (unsigned long)size, &end))
125                 return ret;
126
127         if (unlikely(!ops || !ops->move_notify))
128                 return ret;
129
130         dmabuf = dma_buf_get(fd);
131         if (IS_ERR(dmabuf))
132                 return ERR_CAST(dmabuf);
133
134         if (dmabuf->size < end)
135                 goto out_release_dmabuf;
136
137         umem_dmabuf = kzalloc(sizeof(*umem_dmabuf), GFP_KERNEL);
138         if (!umem_dmabuf) {
139                 ret = ERR_PTR(-ENOMEM);
140                 goto out_release_dmabuf;
141         }
142
143         umem = &umem_dmabuf->umem;
144         umem->ibdev = device;
145         umem->length = size;
146         umem->address = offset;
147         umem->writable = ib_access_writable(access);
148         umem->is_dmabuf = 1;
149
150         if (!ib_umem_num_pages(umem))
151                 goto out_free_umem;
152
153         umem_dmabuf->attach = dma_buf_dynamic_attach(
154                                         dmabuf,
155                                         device->dma_device,
156                                         ops,
157                                         umem_dmabuf);
158         if (IS_ERR(umem_dmabuf->attach)) {
159                 ret = ERR_CAST(umem_dmabuf->attach);
160                 goto out_free_umem;
161         }
162         return umem_dmabuf;
163
164 out_free_umem:
165         kfree(umem_dmabuf);
166
167 out_release_dmabuf:
168         dma_buf_put(dmabuf);
169         return ret;
170 }
171 EXPORT_SYMBOL(ib_umem_dmabuf_get);
172
173 static void
174 ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
175 {
176         struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
177
178         ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
179                                "Invalidate callback should not be called when memory is pinned\n");
180 }
181
182 static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
183         .allow_peer2peer = true,
184         .move_notify = ib_umem_dmabuf_unsupported_move_notify,
185 };
186
187 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
188                                                  unsigned long offset,
189                                                  size_t size, int fd,
190                                                  int access)
191 {
192         struct ib_umem_dmabuf *umem_dmabuf;
193         int err;
194
195         umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
196                                          &ib_umem_dmabuf_attach_pinned_ops);
197         if (IS_ERR(umem_dmabuf))
198                 return umem_dmabuf;
199
200         dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
201         err = dma_buf_pin(umem_dmabuf->attach);
202         if (err)
203                 goto err_release;
204         umem_dmabuf->pinned = 1;
205
206         err = ib_umem_dmabuf_map_pages(umem_dmabuf);
207         if (err)
208                 goto err_unpin;
209         dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
210
211         return umem_dmabuf;
212
213 err_unpin:
214         dma_buf_unpin(umem_dmabuf->attach);
215 err_release:
216         dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
217         ib_umem_release(&umem_dmabuf->umem);
218         return ERR_PTR(err);
219 }
220 EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
221
222 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
223 {
224         struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
225
226         dma_resv_lock(dmabuf->resv, NULL);
227         ib_umem_dmabuf_unmap_pages(umem_dmabuf);
228         if (umem_dmabuf->pinned)
229                 dma_buf_unpin(umem_dmabuf->attach);
230         dma_resv_unlock(dmabuf->resv);
231
232         dma_buf_detach(dmabuf, umem_dmabuf->attach);
233         dma_buf_put(dmabuf);
234         kfree(umem_dmabuf);
235 }