1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2014 IBM Corp.
6 #include <linux/spinlock.h>
7 #include <linux/module.h>
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/bitmap.h>
11 #include <linux/sched/signal.h>
12 #include <linux/poll.h>
13 #include <linux/pid.h>
16 #include <linux/slab.h>
17 #include <linux/sched/mm.h>
18 #include <linux/mmu_context.h>
19 #include <asm/cputable.h>
20 #include <asm/current.h>
21 #include <asm/copro.h>
26 #define CXL_NUM_MINORS 256 /* Total to reserve */
28 #define CXL_AFU_MINOR_D(afu) (CXL_CARD_MINOR(afu->adapter) + 1 + (3 * afu->slice))
29 #define CXL_AFU_MINOR_M(afu) (CXL_AFU_MINOR_D(afu) + 1)
30 #define CXL_AFU_MINOR_S(afu) (CXL_AFU_MINOR_D(afu) + 2)
31 #define CXL_AFU_MKDEV_D(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_D(afu))
32 #define CXL_AFU_MKDEV_M(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_M(afu))
33 #define CXL_AFU_MKDEV_S(afu) MKDEV(MAJOR(cxl_dev), CXL_AFU_MINOR_S(afu))
35 #define CXL_DEVT_AFU(dev) ((MINOR(dev) % CXL_DEV_MINORS - 1) / 3)
37 #define CXL_DEVT_IS_CARD(dev) (MINOR(dev) % CXL_DEV_MINORS == 0)
41 static int __afu_open(struct inode *inode, struct file *file, bool master)
45 struct cxl_context *ctx;
46 int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
47 int slice = CXL_DEVT_AFU(inode->i_rdev);
50 pr_devel("afu_open afu%i.%i\n", slice, adapter_num);
52 if (!(adapter = get_cxl_adapter(adapter_num)))
55 if (slice > adapter->slices)
58 spin_lock(&adapter->afu_list_lock);
59 if (!(afu = adapter->afu[slice])) {
60 spin_unlock(&adapter->afu_list_lock);
65 * taking a ref to the afu so that it doesn't go away
66 * for rest of the function. This ref is released before
70 spin_unlock(&adapter->afu_list_lock);
72 if (!afu->current_mode)
75 if (!cxl_ops->link_ok(adapter, afu)) {
80 if (!(ctx = cxl_context_alloc())) {
85 rc = cxl_context_init(ctx, afu, master);
89 cxl_context_set_mapping(ctx, inode->i_mapping);
91 pr_devel("afu_open pe: %i\n", ctx->pe);
92 file->private_data = ctx;
94 /* indicate success */
98 /* release the ref taken earlier */
101 put_device(&adapter->dev);
105 int afu_open(struct inode *inode, struct file *file)
107 return __afu_open(inode, file, false);
110 static int afu_master_open(struct inode *inode, struct file *file)
112 return __afu_open(inode, file, true);
115 int afu_release(struct inode *inode, struct file *file)
117 struct cxl_context *ctx = file->private_data;
119 pr_devel("%s: closing cxl file descriptor. pe: %i\n",
121 cxl_context_detach(ctx);
125 * Delete the context's mapping pointer, unless it's created by the
126 * kernel API, in which case leave it so it can be freed by reclaim_ctx()
128 if (!ctx->kernelapi) {
129 mutex_lock(&ctx->mapping_lock);
131 mutex_unlock(&ctx->mapping_lock);
135 * At this this point all bottom halfs have finished and we should be
136 * getting no more IRQs from the hardware for this context. Once it's
137 * removed from the IDR (and RCU synchronised) it's safe to free the
140 cxl_context_free(ctx);
145 static long afu_ioctl_start_work(struct cxl_context *ctx,
146 struct cxl_ioctl_start_work __user *uwork)
148 struct cxl_ioctl_start_work work;
152 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
154 /* Do this outside the status_mutex to avoid a circular dependency with
155 * the locking in cxl_mmap_fault() */
156 if (copy_from_user(&work, uwork, sizeof(work)))
159 mutex_lock(&ctx->status_mutex);
160 if (ctx->status != OPENED) {
166 * if any of the reserved fields are set or any of the unused
167 * flags are set it's invalid
169 if (work.reserved1 || work.reserved2 || work.reserved3 ||
170 work.reserved4 || work.reserved5 ||
171 (work.flags & ~CXL_START_WORK_ALL)) {
176 if (!(work.flags & CXL_START_WORK_NUM_IRQS))
177 work.num_interrupts = ctx->afu->pp_irqs;
178 else if ((work.num_interrupts < ctx->afu->pp_irqs) ||
179 (work.num_interrupts > ctx->afu->irqs_max)) {
184 if ((rc = afu_register_irqs(ctx, work.num_interrupts)))
187 if (work.flags & CXL_START_WORK_AMR)
188 amr = work.amr & mfspr(SPRN_UAMOR);
190 if (work.flags & CXL_START_WORK_TID)
191 ctx->assign_tidr = true;
193 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
196 * Increment the mapped context count for adapter. This also checks
197 * if adapter_context_lock is taken.
199 rc = cxl_adapter_context_get(ctx->afu->adapter);
201 afu_release_irqs(ctx, ctx);
206 * We grab the PID here and not in the file open to allow for the case
207 * where a process (master, some daemon, etc) has opened the chardev on
208 * behalf of another process, so the AFU's mm gets bound to the process
209 * that performs this ioctl and not the process that opened the file.
210 * Also we grab the PID of the group leader so that if the task that
211 * has performed the attach operation exits the mm context of the
212 * process is still accessible.
214 ctx->pid = get_task_pid(current, PIDTYPE_PID);
216 /* acquire a reference to the task's mm */
217 ctx->mm = get_task_mm(current);
219 /* ensure this mm_struct can't be freed */
220 cxl_context_mm_count_get(ctx);
223 /* decrement the use count from above */
225 /* make TLBIs for this context global */
226 mm_context_add_copro(ctx->mm);
230 * Increment driver use count. Enables global TLBIs for hash
231 * and callbacks to handle the segment table
236 * A barrier is needed to make sure all TLBIs are global
237 * before we attach and the context starts being used by the
240 * Needed after mm_context_add_copro() for radix and
241 * cxl_ctx_get() for hash/p8.
243 * The barrier should really be mb(), since it involves a
244 * device. However, it's only useful when we have local
245 * vs. global TLBIs, i.e SMP=y. So keep smp_mb().
249 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
251 if ((rc = cxl_ops->attach_process(ctx, false, work.work_element_descriptor,
253 afu_release_irqs(ctx, ctx);
254 cxl_adapter_context_put(ctx->afu->adapter);
258 cxl_context_mm_count_put(ctx);
260 mm_context_remove_copro(ctx->mm);
265 if (work.flags & CXL_START_WORK_TID) {
266 work.tid = ctx->tidr;
267 if (copy_to_user(uwork, &work, sizeof(work)))
271 ctx->status = STARTED;
274 mutex_unlock(&ctx->status_mutex);
278 static long afu_ioctl_process_element(struct cxl_context *ctx,
281 pr_devel("%s: pe: %i\n", __func__, ctx->pe);
283 if (copy_to_user(upe, &ctx->external_pe, sizeof(__u32)))
289 static long afu_ioctl_get_afu_id(struct cxl_context *ctx,
290 struct cxl_afu_id __user *upafuid)
292 struct cxl_afu_id afuid = { 0 };
294 afuid.card_id = ctx->afu->adapter->adapter_num;
295 afuid.afu_offset = ctx->afu->slice;
296 afuid.afu_mode = ctx->afu->current_mode;
298 /* set the flag bit in case the afu is a slave */
299 if (ctx->afu->current_mode == CXL_MODE_DIRECTED && !ctx->master)
300 afuid.flags |= CXL_AFUID_FLAG_SLAVE;
302 if (copy_to_user(upafuid, &afuid, sizeof(afuid)))
308 long afu_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
310 struct cxl_context *ctx = file->private_data;
312 if (ctx->status == CLOSED)
315 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
318 pr_devel("afu_ioctl\n");
320 case CXL_IOCTL_START_WORK:
321 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg);
322 case CXL_IOCTL_GET_PROCESS_ELEMENT:
323 return afu_ioctl_process_element(ctx, (__u32 __user *)arg);
324 case CXL_IOCTL_GET_AFU_ID:
325 return afu_ioctl_get_afu_id(ctx, (struct cxl_afu_id __user *)
331 static long afu_compat_ioctl(struct file *file, unsigned int cmd,
334 return afu_ioctl(file, cmd, arg);
337 int afu_mmap(struct file *file, struct vm_area_struct *vm)
339 struct cxl_context *ctx = file->private_data;
341 /* AFU must be started before we can MMIO */
342 if (ctx->status != STARTED)
345 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
348 return cxl_context_iomap(ctx, vm);
351 static inline bool ctx_event_pending(struct cxl_context *ctx)
353 if (ctx->pending_irq || ctx->pending_fault || ctx->pending_afu_err)
356 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events))
362 __poll_t afu_poll(struct file *file, struct poll_table_struct *poll)
364 struct cxl_context *ctx = file->private_data;
369 poll_wait(file, &ctx->wq, poll);
371 pr_devel("afu_poll wait done pe: %i\n", ctx->pe);
373 spin_lock_irqsave(&ctx->lock, flags);
374 if (ctx_event_pending(ctx))
375 mask |= EPOLLIN | EPOLLRDNORM;
376 else if (ctx->status == CLOSED)
377 /* Only error on closed when there are no futher events pending
380 spin_unlock_irqrestore(&ctx->lock, flags);
382 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask);
387 static ssize_t afu_driver_event_copy(struct cxl_context *ctx,
389 struct cxl_event *event,
390 struct cxl_event_afu_driver_reserved *pl)
394 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
398 /* Check event size */
399 event->header.size += pl->data_size;
400 if (event->header.size > CXL_READ_MIN_SIZE) {
401 ctx->afu_driver_ops->event_delivered(ctx, pl, -EINVAL);
405 /* Copy event header */
406 if (copy_to_user(buf, event, sizeof(struct cxl_event_header))) {
407 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
411 /* Copy event data */
412 buf += sizeof(struct cxl_event_header);
413 if (copy_to_user(buf, &pl->data, pl->data_size)) {
414 ctx->afu_driver_ops->event_delivered(ctx, pl, -EFAULT);
418 ctx->afu_driver_ops->event_delivered(ctx, pl, 0); /* Success */
419 return event->header.size;
422 ssize_t afu_read(struct file *file, char __user *buf, size_t count,
425 struct cxl_context *ctx = file->private_data;
426 struct cxl_event_afu_driver_reserved *pl = NULL;
427 struct cxl_event event;
432 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
435 if (count < CXL_READ_MIN_SIZE)
438 spin_lock_irqsave(&ctx->lock, flags);
441 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE);
442 if (ctx_event_pending(ctx) || (ctx->status == CLOSED))
445 if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
450 if (file->f_flags & O_NONBLOCK) {
455 if (signal_pending(current)) {
460 spin_unlock_irqrestore(&ctx->lock, flags);
461 pr_devel("afu_read going to sleep...\n");
463 pr_devel("afu_read woken up\n");
464 spin_lock_irqsave(&ctx->lock, flags);
467 finish_wait(&ctx->wq, &wait);
469 memset(&event, 0, sizeof(event));
470 event.header.process_element = ctx->pe;
471 event.header.size = sizeof(struct cxl_event_header);
472 if (ctx->afu_driver_ops && atomic_read(&ctx->afu_driver_events)) {
473 pr_devel("afu_read delivering AFU driver specific event\n");
474 pl = ctx->afu_driver_ops->fetch_event(ctx);
475 atomic_dec(&ctx->afu_driver_events);
476 event.header.type = CXL_EVENT_AFU_DRIVER;
477 } else if (ctx->pending_irq) {
478 pr_devel("afu_read delivering AFU interrupt\n");
479 event.header.size += sizeof(struct cxl_event_afu_interrupt);
480 event.header.type = CXL_EVENT_AFU_INTERRUPT;
481 event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1;
482 clear_bit(event.irq.irq - 1, ctx->irq_bitmap);
483 if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count))
484 ctx->pending_irq = false;
485 } else if (ctx->pending_fault) {
486 pr_devel("afu_read delivering data storage fault\n");
487 event.header.size += sizeof(struct cxl_event_data_storage);
488 event.header.type = CXL_EVENT_DATA_STORAGE;
489 event.fault.addr = ctx->fault_addr;
490 event.fault.dsisr = ctx->fault_dsisr;
491 ctx->pending_fault = false;
492 } else if (ctx->pending_afu_err) {
493 pr_devel("afu_read delivering afu error\n");
494 event.header.size += sizeof(struct cxl_event_afu_error);
495 event.header.type = CXL_EVENT_AFU_ERROR;
496 event.afu_error.error = ctx->afu_err;
497 ctx->pending_afu_err = false;
498 } else if (ctx->status == CLOSED) {
499 pr_devel("afu_read fatal error\n");
500 spin_unlock_irqrestore(&ctx->lock, flags);
503 WARN(1, "afu_read must be buggy\n");
505 spin_unlock_irqrestore(&ctx->lock, flags);
507 if (event.header.type == CXL_EVENT_AFU_DRIVER)
508 return afu_driver_event_copy(ctx, buf, &event, pl);
510 if (copy_to_user(buf, &event, event.header.size))
512 return event.header.size;
515 finish_wait(&ctx->wq, &wait);
516 spin_unlock_irqrestore(&ctx->lock, flags);
521 * Note: if this is updated, we need to update api.c to patch the new ones in
524 const struct file_operations afu_fops = {
525 .owner = THIS_MODULE,
529 .release = afu_release,
530 .unlocked_ioctl = afu_ioctl,
531 .compat_ioctl = afu_compat_ioctl,
535 static const struct file_operations afu_master_fops = {
536 .owner = THIS_MODULE,
537 .open = afu_master_open,
540 .release = afu_release,
541 .unlocked_ioctl = afu_ioctl,
542 .compat_ioctl = afu_compat_ioctl,
547 static char *cxl_devnode(const struct device *dev, umode_t *mode)
549 if (cpu_has_feature(CPU_FTR_HVMODE) &&
550 CXL_DEVT_IS_CARD(dev->devt)) {
552 * These minor numbers will eventually be used to program the
553 * PSL and AFUs once we have dynamic reprogramming support
557 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
560 static const struct class cxl_class = {
562 .devnode = cxl_devnode,
565 static int cxl_add_chardev(struct cxl_afu *afu, dev_t devt, struct cdev *cdev,
566 struct device **chardev, char *postfix, char *desc,
567 const struct file_operations *fops)
572 cdev_init(cdev, fops);
573 rc = cdev_add(cdev, devt, 1);
575 dev_err(&afu->dev, "Unable to add %s chardev: %i\n", desc, rc);
579 dev = device_create(&cxl_class, &afu->dev, devt, afu,
580 "afu%i.%i%s", afu->adapter->adapter_num, afu->slice, postfix);
583 dev_err(&afu->dev, "Unable to create %s chardev in sysfs: %i\n", desc, rc);
595 int cxl_chardev_d_afu_add(struct cxl_afu *afu)
597 return cxl_add_chardev(afu, CXL_AFU_MKDEV_D(afu), &afu->afu_cdev_d,
598 &afu->chardev_d, "d", "dedicated",
599 &afu_master_fops); /* Uses master fops */
602 int cxl_chardev_m_afu_add(struct cxl_afu *afu)
604 return cxl_add_chardev(afu, CXL_AFU_MKDEV_M(afu), &afu->afu_cdev_m,
605 &afu->chardev_m, "m", "master",
609 int cxl_chardev_s_afu_add(struct cxl_afu *afu)
611 return cxl_add_chardev(afu, CXL_AFU_MKDEV_S(afu), &afu->afu_cdev_s,
612 &afu->chardev_s, "s", "shared",
616 void cxl_chardev_afu_remove(struct cxl_afu *afu)
618 if (afu->chardev_d) {
619 cdev_del(&afu->afu_cdev_d);
620 device_unregister(afu->chardev_d);
621 afu->chardev_d = NULL;
623 if (afu->chardev_m) {
624 cdev_del(&afu->afu_cdev_m);
625 device_unregister(afu->chardev_m);
626 afu->chardev_m = NULL;
628 if (afu->chardev_s) {
629 cdev_del(&afu->afu_cdev_s);
630 device_unregister(afu->chardev_s);
631 afu->chardev_s = NULL;
635 int cxl_register_afu(struct cxl_afu *afu)
637 afu->dev.class = &cxl_class;
639 return device_register(&afu->dev);
642 int cxl_register_adapter(struct cxl *adapter)
644 adapter->dev.class = &cxl_class;
647 * Future: When we support dynamically reprogramming the PSL & AFU we
648 * will expose the interface to do that via a chardev:
649 * adapter->dev.devt = CXL_CARD_MKDEV(adapter);
652 return device_register(&adapter->dev);
655 dev_t cxl_get_dev(void)
660 int __init cxl_file_init(void)
665 * If these change we really need to update API. Either change some
666 * flags or update API version number CXL_API_VERSION.
668 BUILD_BUG_ON(CXL_API_VERSION != 3);
669 BUILD_BUG_ON(sizeof(struct cxl_ioctl_start_work) != 64);
670 BUILD_BUG_ON(sizeof(struct cxl_event_header) != 8);
671 BUILD_BUG_ON(sizeof(struct cxl_event_afu_interrupt) != 8);
672 BUILD_BUG_ON(sizeof(struct cxl_event_data_storage) != 32);
673 BUILD_BUG_ON(sizeof(struct cxl_event_afu_error) != 16);
675 if ((rc = alloc_chrdev_region(&cxl_dev, 0, CXL_NUM_MINORS, "cxl"))) {
676 pr_err("Unable to allocate CXL major number: %i\n", rc);
680 pr_devel("CXL device allocated, MAJOR %i\n", MAJOR(cxl_dev));
682 rc = class_register(&cxl_class);
684 pr_err("Unable to create CXL class\n");
691 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
695 void cxl_file_exit(void)
697 unregister_chrdev_region(cxl_dev, CXL_NUM_MINORS);
698 class_unregister(&cxl_class);