GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / staging / gasket / gasket_ioctl.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018 Google, Inc. */
3 #include "gasket.h"
4 #include "gasket_ioctl.h"
5 #include "gasket_constants.h"
6 #include "gasket_core.h"
7 #include "gasket_interrupt.h"
8 #include "gasket_page_table.h"
9 #include <linux/compiler.h>
10 #include <linux/device.h>
11 #include <linux/fs.h>
12 #include <linux/uaccess.h>
13
14 #ifdef GASKET_KERNEL_TRACE_SUPPORT
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/gasket_ioctl.h>
17 #else
18 #define trace_gasket_ioctl_entry(x, ...)
19 #define trace_gasket_ioctl_exit(x)
20 #define trace_gasket_ioctl_integer_data(x)
21 #define trace_gasket_ioctl_eventfd_data(x, ...)
22 #define trace_gasket_ioctl_page_table_data(x, ...)
23 #define trace_gasket_ioctl_config_coherent_allocator(x, ...)
24 #endif
25
26 /* Associate an eventfd with an interrupt. */
27 static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
28                                struct gasket_interrupt_eventfd __user *argp)
29 {
30         struct gasket_interrupt_eventfd die;
31
32         if (copy_from_user(&die, argp, sizeof(struct gasket_interrupt_eventfd)))
33                 return -EFAULT;
34
35         trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
36
37         return gasket_interrupt_set_eventfd(gasket_dev->interrupt_data,
38                                             die.interrupt, die.event_fd);
39 }
40
41 /* Read the size of the page table. */
42 static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
43         struct gasket_page_table_ioctl __user *argp)
44 {
45         int ret = 0;
46         struct gasket_page_table_ioctl ibuf;
47
48         if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
49                 return -EFAULT;
50
51         if (ibuf.page_table_index >= gasket_dev->num_page_tables)
52                 return -EFAULT;
53
54         ibuf.size = gasket_page_table_num_entries(
55                 gasket_dev->page_table[ibuf.page_table_index]);
56
57         trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
58                                            ibuf.host_address,
59                                            ibuf.device_address);
60
61         if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
62                 return -EFAULT;
63
64         return ret;
65 }
66
67 /* Read the size of the simple page table. */
68 static int gasket_read_simple_page_table_size(struct gasket_dev *gasket_dev,
69         struct gasket_page_table_ioctl __user *argp)
70 {
71         int ret = 0;
72         struct gasket_page_table_ioctl ibuf;
73
74         if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
75                 return -EFAULT;
76
77         if (ibuf.page_table_index >= gasket_dev->num_page_tables)
78                 return -EFAULT;
79
80         ibuf.size =
81                 gasket_page_table_num_simple_entries(gasket_dev->page_table[ibuf.page_table_index]);
82
83         trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
84                                            ibuf.host_address,
85                                            ibuf.device_address);
86
87         if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
88                 return -EFAULT;
89
90         return ret;
91 }
92
93 /* Set the boundary between the simple and extended page tables. */
94 static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
95         struct gasket_page_table_ioctl __user *argp)
96 {
97         int ret;
98         struct gasket_page_table_ioctl ibuf;
99         uint max_page_table_size;
100
101         if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
102                 return -EFAULT;
103
104         trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
105                                            ibuf.host_address,
106                                            ibuf.device_address);
107
108         if (ibuf.page_table_index >= gasket_dev->num_page_tables)
109                 return -EFAULT;
110         max_page_table_size = gasket_page_table_max_size(
111                 gasket_dev->page_table[ibuf.page_table_index]);
112
113         if (ibuf.size > max_page_table_size) {
114                 dev_dbg(gasket_dev->dev,
115                         "Partition request 0x%llx too large, max is 0x%x\n",
116                         ibuf.size, max_page_table_size);
117                 return -EINVAL;
118         }
119
120         mutex_lock(&gasket_dev->mutex);
121
122         ret = gasket_page_table_partition(
123                 gasket_dev->page_table[ibuf.page_table_index], ibuf.size);
124         mutex_unlock(&gasket_dev->mutex);
125
126         return ret;
127 }
128
129 /* Map a userspace buffer to a device virtual address. */
130 static int gasket_map_buffers(struct gasket_dev *gasket_dev,
131                               struct gasket_page_table_ioctl __user *argp)
132 {
133         struct gasket_page_table_ioctl ibuf;
134
135         if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
136                 return -EFAULT;
137
138         trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
139                                            ibuf.host_address,
140                                            ibuf.device_address);
141
142         if (ibuf.page_table_index >= gasket_dev->num_page_tables)
143                 return -EFAULT;
144
145         if (gasket_page_table_are_addrs_bad(gasket_dev->page_table[ibuf.page_table_index],
146                                             ibuf.host_address,
147                                             ibuf.device_address, ibuf.size))
148                 return -EINVAL;
149
150         return gasket_page_table_map(gasket_dev->page_table[ibuf.page_table_index],
151                                      ibuf.host_address, ibuf.device_address,
152                                      ibuf.size / PAGE_SIZE);
153 }
154
155 /* Unmap a userspace buffer from a device virtual address. */
156 static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
157                                 struct gasket_page_table_ioctl __user *argp)
158 {
159         struct gasket_page_table_ioctl ibuf;
160
161         if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
162                 return -EFAULT;
163
164         trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
165                                            ibuf.host_address,
166                                            ibuf.device_address);
167
168         if (ibuf.page_table_index >= gasket_dev->num_page_tables)
169                 return -EFAULT;
170
171         if (gasket_page_table_is_dev_addr_bad(gasket_dev->page_table[ibuf.page_table_index],
172                                               ibuf.device_address, ibuf.size))
173                 return -EINVAL;
174
175         gasket_page_table_unmap(gasket_dev->page_table[ibuf.page_table_index],
176                                 ibuf.device_address, ibuf.size / PAGE_SIZE);
177
178         return 0;
179 }
180
181 /*
182  * Reserve structures for coherent allocation, and allocate or free the
183  * corresponding memory.
184  */
185 static int gasket_config_coherent_allocator(struct gasket_dev *gasket_dev,
186         struct gasket_coherent_alloc_config_ioctl __user *argp)
187 {
188         int ret;
189         struct gasket_coherent_alloc_config_ioctl ibuf;
190
191         if (copy_from_user(&ibuf, argp,
192                            sizeof(struct gasket_coherent_alloc_config_ioctl)))
193                 return -EFAULT;
194
195         trace_gasket_ioctl_config_coherent_allocator(ibuf.enable, ibuf.size,
196                                                      ibuf.dma_address);
197
198         if (ibuf.page_table_index >= gasket_dev->num_page_tables)
199                 return -EFAULT;
200
201         if (ibuf.size > PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
202                 return -ENOMEM;
203
204         if (ibuf.enable == 0) {
205                 ret = gasket_free_coherent_memory(gasket_dev, ibuf.size,
206                                                   ibuf.dma_address,
207                                                   ibuf.page_table_index);
208         } else {
209                 ret = gasket_alloc_coherent_memory(gasket_dev, ibuf.size,
210                                                    &ibuf.dma_address,
211                                                    ibuf.page_table_index);
212         }
213         if (ret)
214                 return ret;
215         if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
216                 return -EFAULT;
217
218         return 0;
219 }
220
221 /* Check permissions for Gasket ioctls. */
222 static bool gasket_ioctl_check_permissions(struct file *filp, uint cmd)
223 {
224         bool alive;
225         bool read, write;
226         struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
227
228         alive = (gasket_dev->status == GASKET_STATUS_ALIVE);
229         if (!alive)
230                 dev_dbg(gasket_dev->dev, "%s alive %d status %d\n",
231                         __func__, alive, gasket_dev->status);
232
233         read = !!(filp->f_mode & FMODE_READ);
234         write = !!(filp->f_mode & FMODE_WRITE);
235
236         switch (cmd) {
237         case GASKET_IOCTL_RESET:
238         case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
239                 return write;
240
241         case GASKET_IOCTL_PAGE_TABLE_SIZE:
242         case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
243         case GASKET_IOCTL_NUMBER_PAGE_TABLES:
244                 return read;
245
246         case GASKET_IOCTL_PARTITION_PAGE_TABLE:
247         case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
248                 return alive && write;
249
250         case GASKET_IOCTL_MAP_BUFFER:
251         case GASKET_IOCTL_UNMAP_BUFFER:
252                 return alive && write;
253
254         case GASKET_IOCTL_CLEAR_EVENTFD:
255         case GASKET_IOCTL_SET_EVENTFD:
256                 return alive && write;
257         }
258
259         return false; /* unknown permissions */
260 }
261
262 /*
263  * standard ioctl dispatch function.
264  * @filp: File structure pointer describing this node usage session.
265  * @cmd: ioctl number to handle.
266  * @argp: ioctl-specific data pointer.
267  *
268  * Standard ioctl dispatcher; forwards operations to individual handlers.
269  */
270 long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp)
271 {
272         struct gasket_dev *gasket_dev;
273         unsigned long arg = (unsigned long)argp;
274         gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
275         int retval;
276
277         gasket_dev = (struct gasket_dev *)filp->private_data;
278         trace_gasket_ioctl_entry(gasket_dev->dev_info.name, cmd);
279
280         ioctl_permissions_cb = gasket_get_ioctl_permissions_cb(gasket_dev);
281         if (ioctl_permissions_cb) {
282                 retval = ioctl_permissions_cb(filp, cmd, argp);
283                 if (retval < 0) {
284                         trace_gasket_ioctl_exit(retval);
285                         return retval;
286                 } else if (retval == 0) {
287                         trace_gasket_ioctl_exit(-EPERM);
288                         return -EPERM;
289                 }
290         } else if (!gasket_ioctl_check_permissions(filp, cmd)) {
291                 trace_gasket_ioctl_exit(-EPERM);
292                 dev_dbg(gasket_dev->dev, "ioctl cmd=%x noperm\n", cmd);
293                 return -EPERM;
294         }
295
296         /* Tracing happens in this switch statement for all ioctls with
297          * an integer argrument, but ioctls with a struct argument
298          * that needs copying and decoding, that tracing is done within
299          * the handler call.
300          */
301         switch (cmd) {
302         case GASKET_IOCTL_RESET:
303                 retval = gasket_reset(gasket_dev);
304                 break;
305         case GASKET_IOCTL_SET_EVENTFD:
306                 retval = gasket_set_event_fd(gasket_dev, argp);
307                 break;
308         case GASKET_IOCTL_CLEAR_EVENTFD:
309                 trace_gasket_ioctl_integer_data(arg);
310                 retval =
311                         gasket_interrupt_clear_eventfd(gasket_dev->interrupt_data,
312                                                        (int)arg);
313                 break;
314         case GASKET_IOCTL_PARTITION_PAGE_TABLE:
315                 trace_gasket_ioctl_integer_data(arg);
316                 retval = gasket_partition_page_table(gasket_dev, argp);
317                 break;
318         case GASKET_IOCTL_NUMBER_PAGE_TABLES:
319                 trace_gasket_ioctl_integer_data(gasket_dev->num_page_tables);
320                 if (copy_to_user(argp, &gasket_dev->num_page_tables,
321                                  sizeof(uint64_t)))
322                         retval = -EFAULT;
323                 else
324                         retval = 0;
325                 break;
326         case GASKET_IOCTL_PAGE_TABLE_SIZE:
327                 retval = gasket_read_page_table_size(gasket_dev, argp);
328                 break;
329         case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
330                 retval = gasket_read_simple_page_table_size(gasket_dev, argp);
331                 break;
332         case GASKET_IOCTL_MAP_BUFFER:
333                 retval = gasket_map_buffers(gasket_dev, argp);
334                 break;
335         case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
336                 retval = gasket_config_coherent_allocator(gasket_dev, argp);
337                 break;
338         case GASKET_IOCTL_UNMAP_BUFFER:
339                 retval = gasket_unmap_buffers(gasket_dev, argp);
340                 break;
341         case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
342                 /* Clear interrupt counts doesn't take an arg, so use 0. */
343                 trace_gasket_ioctl_integer_data(0);
344                 retval = gasket_interrupt_reset_counts(gasket_dev);
345                 break;
346         default:
347                 /* If we don't understand the ioctl, the best we can do is trace
348                  * the arg.
349                  */
350                 trace_gasket_ioctl_integer_data(arg);
351                 dev_dbg(gasket_dev->dev,
352                         "Unknown ioctl cmd=0x%x not caught by gasket_is_supported_ioctl\n",
353                         cmd);
354                 retval = -EINVAL;
355                 break;
356         }
357
358         trace_gasket_ioctl_exit(retval);
359         return retval;
360 }
361
362 /*
363  * Determines if an ioctl is part of the standard Gasket framework.
364  * @cmd: The ioctl number to handle.
365  *
366  * Returns 1 if the ioctl is supported and 0 otherwise.
367  */
368 long gasket_is_supported_ioctl(uint cmd)
369 {
370         switch (cmd) {
371         case GASKET_IOCTL_RESET:
372         case GASKET_IOCTL_SET_EVENTFD:
373         case GASKET_IOCTL_CLEAR_EVENTFD:
374         case GASKET_IOCTL_PARTITION_PAGE_TABLE:
375         case GASKET_IOCTL_NUMBER_PAGE_TABLES:
376         case GASKET_IOCTL_PAGE_TABLE_SIZE:
377         case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
378         case GASKET_IOCTL_MAP_BUFFER:
379         case GASKET_IOCTL_UNMAP_BUFFER:
380         case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
381         case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
382                 return 1;
383         default:
384                 return 0;
385         }
386 }