1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2018 Google, Inc. */
4 #include "gasket_ioctl.h"
5 #include "gasket_constants.h"
6 #include "gasket_core.h"
7 #include "gasket_interrupt.h"
8 #include "gasket_page_table.h"
9 #include <linux/compiler.h>
10 #include <linux/device.h>
12 #include <linux/uaccess.h>
14 #ifdef GASKET_KERNEL_TRACE_SUPPORT
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/gasket_ioctl.h>
18 #define trace_gasket_ioctl_entry(x, ...)
19 #define trace_gasket_ioctl_exit(x)
20 #define trace_gasket_ioctl_integer_data(x)
21 #define trace_gasket_ioctl_eventfd_data(x, ...)
22 #define trace_gasket_ioctl_page_table_data(x, ...)
23 #define trace_gasket_ioctl_config_coherent_allocator(x, ...)
26 /* Associate an eventfd with an interrupt. */
27 static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
28 struct gasket_interrupt_eventfd __user *argp)
30 struct gasket_interrupt_eventfd die;
32 if (copy_from_user(&die, argp, sizeof(struct gasket_interrupt_eventfd)))
35 trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
37 return gasket_interrupt_set_eventfd(gasket_dev->interrupt_data,
38 die.interrupt, die.event_fd);
41 /* Read the size of the page table. */
42 static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
43 struct gasket_page_table_ioctl __user *argp)
46 struct gasket_page_table_ioctl ibuf;
48 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
51 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
54 ibuf.size = gasket_page_table_num_entries(
55 gasket_dev->page_table[ibuf.page_table_index]);
57 trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
61 if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
67 /* Read the size of the simple page table. */
68 static int gasket_read_simple_page_table_size(struct gasket_dev *gasket_dev,
69 struct gasket_page_table_ioctl __user *argp)
72 struct gasket_page_table_ioctl ibuf;
74 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
77 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
81 gasket_page_table_num_simple_entries(gasket_dev->page_table[ibuf.page_table_index]);
83 trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
87 if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
93 /* Set the boundary between the simple and extended page tables. */
94 static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
95 struct gasket_page_table_ioctl __user *argp)
98 struct gasket_page_table_ioctl ibuf;
99 uint max_page_table_size;
101 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
104 trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
106 ibuf.device_address);
108 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
110 max_page_table_size = gasket_page_table_max_size(
111 gasket_dev->page_table[ibuf.page_table_index]);
113 if (ibuf.size > max_page_table_size) {
114 dev_dbg(gasket_dev->dev,
115 "Partition request 0x%llx too large, max is 0x%x\n",
116 ibuf.size, max_page_table_size);
120 mutex_lock(&gasket_dev->mutex);
122 ret = gasket_page_table_partition(
123 gasket_dev->page_table[ibuf.page_table_index], ibuf.size);
124 mutex_unlock(&gasket_dev->mutex);
129 /* Map a userspace buffer to a device virtual address. */
130 static int gasket_map_buffers(struct gasket_dev *gasket_dev,
131 struct gasket_page_table_ioctl __user *argp)
133 struct gasket_page_table_ioctl ibuf;
135 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
138 trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
140 ibuf.device_address);
142 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
145 if (gasket_page_table_are_addrs_bad(gasket_dev->page_table[ibuf.page_table_index],
147 ibuf.device_address, ibuf.size))
150 return gasket_page_table_map(gasket_dev->page_table[ibuf.page_table_index],
151 ibuf.host_address, ibuf.device_address,
152 ibuf.size / PAGE_SIZE);
155 /* Unmap a userspace buffer from a device virtual address. */
156 static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
157 struct gasket_page_table_ioctl __user *argp)
159 struct gasket_page_table_ioctl ibuf;
161 if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
164 trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
166 ibuf.device_address);
168 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
171 if (gasket_page_table_is_dev_addr_bad(gasket_dev->page_table[ibuf.page_table_index],
172 ibuf.device_address, ibuf.size))
175 gasket_page_table_unmap(gasket_dev->page_table[ibuf.page_table_index],
176 ibuf.device_address, ibuf.size / PAGE_SIZE);
182 * Reserve structures for coherent allocation, and allocate or free the
183 * corresponding memory.
185 static int gasket_config_coherent_allocator(struct gasket_dev *gasket_dev,
186 struct gasket_coherent_alloc_config_ioctl __user *argp)
189 struct gasket_coherent_alloc_config_ioctl ibuf;
191 if (copy_from_user(&ibuf, argp,
192 sizeof(struct gasket_coherent_alloc_config_ioctl)))
195 trace_gasket_ioctl_config_coherent_allocator(ibuf.enable, ibuf.size,
198 if (ibuf.page_table_index >= gasket_dev->num_page_tables)
201 if (ibuf.size > PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
204 if (ibuf.enable == 0) {
205 ret = gasket_free_coherent_memory(gasket_dev, ibuf.size,
207 ibuf.page_table_index);
209 ret = gasket_alloc_coherent_memory(gasket_dev, ibuf.size,
211 ibuf.page_table_index);
215 if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
221 /* Check permissions for Gasket ioctls. */
222 static bool gasket_ioctl_check_permissions(struct file *filp, uint cmd)
226 struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
228 alive = (gasket_dev->status == GASKET_STATUS_ALIVE);
230 dev_dbg(gasket_dev->dev, "%s alive %d status %d\n",
231 __func__, alive, gasket_dev->status);
233 read = !!(filp->f_mode & FMODE_READ);
234 write = !!(filp->f_mode & FMODE_WRITE);
237 case GASKET_IOCTL_RESET:
238 case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
241 case GASKET_IOCTL_PAGE_TABLE_SIZE:
242 case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
243 case GASKET_IOCTL_NUMBER_PAGE_TABLES:
246 case GASKET_IOCTL_PARTITION_PAGE_TABLE:
247 case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
248 return alive && write;
250 case GASKET_IOCTL_MAP_BUFFER:
251 case GASKET_IOCTL_UNMAP_BUFFER:
252 return alive && write;
254 case GASKET_IOCTL_CLEAR_EVENTFD:
255 case GASKET_IOCTL_SET_EVENTFD:
256 return alive && write;
259 return false; /* unknown permissions */
263 * standard ioctl dispatch function.
264 * @filp: File structure pointer describing this node usage session.
265 * @cmd: ioctl number to handle.
266 * @argp: ioctl-specific data pointer.
268 * Standard ioctl dispatcher; forwards operations to individual handlers.
270 long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp)
272 struct gasket_dev *gasket_dev;
273 unsigned long arg = (unsigned long)argp;
274 gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
277 gasket_dev = (struct gasket_dev *)filp->private_data;
278 trace_gasket_ioctl_entry(gasket_dev->dev_info.name, cmd);
280 ioctl_permissions_cb = gasket_get_ioctl_permissions_cb(gasket_dev);
281 if (ioctl_permissions_cb) {
282 retval = ioctl_permissions_cb(filp, cmd, argp);
284 trace_gasket_ioctl_exit(retval);
286 } else if (retval == 0) {
287 trace_gasket_ioctl_exit(-EPERM);
290 } else if (!gasket_ioctl_check_permissions(filp, cmd)) {
291 trace_gasket_ioctl_exit(-EPERM);
292 dev_dbg(gasket_dev->dev, "ioctl cmd=%x noperm\n", cmd);
296 /* Tracing happens in this switch statement for all ioctls with
297 * an integer argrument, but ioctls with a struct argument
298 * that needs copying and decoding, that tracing is done within
302 case GASKET_IOCTL_RESET:
303 retval = gasket_reset(gasket_dev);
305 case GASKET_IOCTL_SET_EVENTFD:
306 retval = gasket_set_event_fd(gasket_dev, argp);
308 case GASKET_IOCTL_CLEAR_EVENTFD:
309 trace_gasket_ioctl_integer_data(arg);
311 gasket_interrupt_clear_eventfd(gasket_dev->interrupt_data,
314 case GASKET_IOCTL_PARTITION_PAGE_TABLE:
315 trace_gasket_ioctl_integer_data(arg);
316 retval = gasket_partition_page_table(gasket_dev, argp);
318 case GASKET_IOCTL_NUMBER_PAGE_TABLES:
319 trace_gasket_ioctl_integer_data(gasket_dev->num_page_tables);
320 if (copy_to_user(argp, &gasket_dev->num_page_tables,
326 case GASKET_IOCTL_PAGE_TABLE_SIZE:
327 retval = gasket_read_page_table_size(gasket_dev, argp);
329 case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
330 retval = gasket_read_simple_page_table_size(gasket_dev, argp);
332 case GASKET_IOCTL_MAP_BUFFER:
333 retval = gasket_map_buffers(gasket_dev, argp);
335 case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
336 retval = gasket_config_coherent_allocator(gasket_dev, argp);
338 case GASKET_IOCTL_UNMAP_BUFFER:
339 retval = gasket_unmap_buffers(gasket_dev, argp);
341 case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
342 /* Clear interrupt counts doesn't take an arg, so use 0. */
343 trace_gasket_ioctl_integer_data(0);
344 retval = gasket_interrupt_reset_counts(gasket_dev);
347 /* If we don't understand the ioctl, the best we can do is trace
350 trace_gasket_ioctl_integer_data(arg);
351 dev_dbg(gasket_dev->dev,
352 "Unknown ioctl cmd=0x%x not caught by gasket_is_supported_ioctl\n",
358 trace_gasket_ioctl_exit(retval);
363 * Determines if an ioctl is part of the standard Gasket framework.
364 * @cmd: The ioctl number to handle.
366 * Returns 1 if the ioctl is supported and 0 otherwise.
368 long gasket_is_supported_ioctl(uint cmd)
371 case GASKET_IOCTL_RESET:
372 case GASKET_IOCTL_SET_EVENTFD:
373 case GASKET_IOCTL_CLEAR_EVENTFD:
374 case GASKET_IOCTL_PARTITION_PAGE_TABLE:
375 case GASKET_IOCTL_NUMBER_PAGE_TABLES:
376 case GASKET_IOCTL_PAGE_TABLE_SIZE:
377 case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
378 case GASKET_IOCTL_MAP_BUFFER:
379 case GASKET_IOCTL_UNMAP_BUFFER:
380 case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
381 case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR: