GNU Linux-libre 4.14.254-gnu1
[releases.git] / drivers / gpu / drm / i915 / selftests / mock_dmabuf.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "mock_dmabuf.h"
26
27 static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
28                                          enum dma_data_direction dir)
29 {
30         struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
31         struct sg_table *st;
32         struct scatterlist *sg;
33         int i, err;
34
35         st = kmalloc(sizeof(*st), GFP_KERNEL);
36         if (!st)
37                 return ERR_PTR(-ENOMEM);
38
39         err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
40         if (err)
41                 goto err_free;
42
43         sg = st->sgl;
44         for (i = 0; i < mock->npages; i++) {
45                 sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
46                 sg = sg_next(sg);
47         }
48
49         if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
50                 err = -ENOMEM;
51                 goto err_st;
52         }
53
54         return st;
55
56 err_st:
57         sg_free_table(st);
58 err_free:
59         kfree(st);
60         return ERR_PTR(err);
61 }
62
63 static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
64                                struct sg_table *st,
65                                enum dma_data_direction dir)
66 {
67         dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
68         sg_free_table(st);
69         kfree(st);
70 }
71
72 static void mock_dmabuf_release(struct dma_buf *dma_buf)
73 {
74         struct mock_dmabuf *mock = to_mock(dma_buf);
75         int i;
76
77         for (i = 0; i < mock->npages; i++)
78                 put_page(mock->pages[i]);
79
80         kfree(mock);
81 }
82
83 static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
84 {
85         struct mock_dmabuf *mock = to_mock(dma_buf);
86
87         return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
88 }
89
90 static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
91 {
92         struct mock_dmabuf *mock = to_mock(dma_buf);
93
94         vm_unmap_ram(vaddr, mock->npages);
95 }
96
97 static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
98 {
99         struct mock_dmabuf *mock = to_mock(dma_buf);
100
101         return kmap_atomic(mock->pages[page_num]);
102 }
103
104 static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
105 {
106         kunmap_atomic(addr);
107 }
108
109 static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
110 {
111         struct mock_dmabuf *mock = to_mock(dma_buf);
112
113         return kmap(mock->pages[page_num]);
114 }
115
116 static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
117 {
118         struct mock_dmabuf *mock = to_mock(dma_buf);
119
120         return kunmap(mock->pages[page_num]);
121 }
122
123 static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
124 {
125         return -ENODEV;
126 }
127
128 static const struct dma_buf_ops mock_dmabuf_ops =  {
129         .map_dma_buf = mock_map_dma_buf,
130         .unmap_dma_buf = mock_unmap_dma_buf,
131         .release = mock_dmabuf_release,
132         .map = mock_dmabuf_kmap,
133         .map_atomic = mock_dmabuf_kmap_atomic,
134         .unmap = mock_dmabuf_kunmap,
135         .unmap_atomic = mock_dmabuf_kunmap_atomic,
136         .mmap = mock_dmabuf_mmap,
137         .vmap = mock_dmabuf_vmap,
138         .vunmap = mock_dmabuf_vunmap,
139 };
140
141 static struct dma_buf *mock_dmabuf(int npages)
142 {
143         struct mock_dmabuf *mock;
144         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
145         struct dma_buf *dmabuf;
146         int i;
147
148         mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
149                        GFP_KERNEL);
150         if (!mock)
151                 return ERR_PTR(-ENOMEM);
152
153         mock->npages = npages;
154         for (i = 0; i < npages; i++) {
155                 mock->pages[i] = alloc_page(GFP_KERNEL);
156                 if (!mock->pages[i])
157                         goto err;
158         }
159
160         exp_info.ops = &mock_dmabuf_ops;
161         exp_info.size = npages * PAGE_SIZE;
162         exp_info.flags = O_CLOEXEC;
163         exp_info.priv = mock;
164
165         dmabuf = dma_buf_export(&exp_info);
166         if (IS_ERR(dmabuf))
167                 goto err;
168
169         return dmabuf;
170
171 err:
172         while (i--)
173                 put_page(mock->pages[i]);
174         kfree(mock);
175         return ERR_PTR(-ENOMEM);
176 }