GNU Linux-libre 4.19.268-gnu1
[releases.git] / drivers / gpu / drm / i915 / selftests / mock_dmabuf.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "mock_dmabuf.h"
26
27 static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
28                                          enum dma_data_direction dir)
29 {
30         struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
31         struct sg_table *st;
32         struct scatterlist *sg;
33         int i, err;
34
35         st = kmalloc(sizeof(*st), GFP_KERNEL);
36         if (!st)
37                 return ERR_PTR(-ENOMEM);
38
39         err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
40         if (err)
41                 goto err_free;
42
43         sg = st->sgl;
44         for (i = 0; i < mock->npages; i++) {
45                 sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
46                 sg = sg_next(sg);
47         }
48
49         if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
50                 err = -ENOMEM;
51                 goto err_st;
52         }
53
54         return st;
55
56 err_st:
57         sg_free_table(st);
58 err_free:
59         kfree(st);
60         return ERR_PTR(err);
61 }
62
63 static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
64                                struct sg_table *st,
65                                enum dma_data_direction dir)
66 {
67         dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
68         sg_free_table(st);
69         kfree(st);
70 }
71
72 static void mock_dmabuf_release(struct dma_buf *dma_buf)
73 {
74         struct mock_dmabuf *mock = to_mock(dma_buf);
75         int i;
76
77         for (i = 0; i < mock->npages; i++)
78                 put_page(mock->pages[i]);
79
80         kfree(mock);
81 }
82
83 static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
84 {
85         struct mock_dmabuf *mock = to_mock(dma_buf);
86
87         return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
88 }
89
90 static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
91 {
92         struct mock_dmabuf *mock = to_mock(dma_buf);
93
94         vm_unmap_ram(vaddr, mock->npages);
95 }
96
97 static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
98 {
99         struct mock_dmabuf *mock = to_mock(dma_buf);
100
101         return kmap(mock->pages[page_num]);
102 }
103
104 static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
105 {
106         struct mock_dmabuf *mock = to_mock(dma_buf);
107
108         return kunmap(mock->pages[page_num]);
109 }
110
111 static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
112 {
113         return -ENODEV;
114 }
115
116 static const struct dma_buf_ops mock_dmabuf_ops =  {
117         .map_dma_buf = mock_map_dma_buf,
118         .unmap_dma_buf = mock_unmap_dma_buf,
119         .release = mock_dmabuf_release,
120         .map = mock_dmabuf_kmap,
121         .unmap = mock_dmabuf_kunmap,
122         .mmap = mock_dmabuf_mmap,
123         .vmap = mock_dmabuf_vmap,
124         .vunmap = mock_dmabuf_vunmap,
125 };
126
127 static struct dma_buf *mock_dmabuf(int npages)
128 {
129         struct mock_dmabuf *mock;
130         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
131         struct dma_buf *dmabuf;
132         int i;
133
134         mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
135                        GFP_KERNEL);
136         if (!mock)
137                 return ERR_PTR(-ENOMEM);
138
139         mock->npages = npages;
140         for (i = 0; i < npages; i++) {
141                 mock->pages[i] = alloc_page(GFP_KERNEL);
142                 if (!mock->pages[i])
143                         goto err;
144         }
145
146         exp_info.ops = &mock_dmabuf_ops;
147         exp_info.size = npages * PAGE_SIZE;
148         exp_info.flags = O_CLOEXEC;
149         exp_info.priv = mock;
150
151         dmabuf = dma_buf_export(&exp_info);
152         if (IS_ERR(dmabuf))
153                 goto err;
154
155         return dmabuf;
156
157 err:
158         while (i--)
159                 put_page(mock->pages[i]);
160         kfree(mock);
161         return ERR_PTR(-ENOMEM);
162 }