2 * This file implements the DMA operations for NVLink devices. The NPU
3 * devices all point to the same iommu table as the parent PCI device.
5 * Copyright Alistair Popple, IBM Corporation 2015.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of version 2 of the GNU General Public
9 * License as published by the Free Software Foundation.
12 #include <linux/slab.h>
13 #include <linux/mmu_notifier.h>
14 #include <linux/mmu_context.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <linux/memblock.h>
19 #include <linux/iommu.h>
20 #include <linux/debugfs.h>
22 #include <asm/debugfs.h>
24 #include <asm/powernv.h>
28 #include <asm/iommu.h>
29 #include <asm/pnv-pci.h>
30 #include <asm/msi_bitmap.h>
36 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
39 * spinlock to protect initialisation of an npu_context for a particular
42 static DEFINE_SPINLOCK(npu_context_lock);
45 * When an address shootdown range exceeds this threshold we invalidate the
46 * entire TLB on the GPU for the given PID rather than each specific address in
49 static uint64_t atsd_threshold = 2 * 1024 * 1024;
50 static struct dentry *atsd_threshold_dentry;
53 * Other types of TCE cache invalidation are not functional in the
56 static struct pci_dev *get_pci_dev(struct device_node *dn)
58 struct pci_dn *pdn = PCI_DN(dn);
61 pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
62 pdn->busno, pdn->devfn);
65 * pci_get_domain_bus_and_slot() increased the reference count of
66 * the PCI device, but callers don't need that actually as the PE
67 * already holds a reference to the device. Since callers aren't
68 * aware of the reference count change, call pci_dev_put() now to
77 /* Given a NPU device get the associated PCI device. */
78 struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
80 struct device_node *dn;
81 struct pci_dev *gpdev;
86 if (WARN_ON(!npdev->dev.of_node))
89 /* Get assoicated PCI device */
90 dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
94 gpdev = get_pci_dev(dn);
99 EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
101 /* Given the real PCI device get a linked NPU device. */
102 struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
104 struct device_node *dn;
105 struct pci_dev *npdev;
110 /* Not all PCI devices have device-tree nodes */
111 if (!gpdev->dev.of_node)
114 /* Get assoicated PCI device */
115 dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
119 npdev = get_pci_dev(dn);
124 EXPORT_SYMBOL(pnv_pci_get_npu_dev);
126 #define NPU_DMA_OP_UNSUPPORTED() \
127 dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
130 static void *dma_npu_alloc(struct device *dev, size_t size,
131 dma_addr_t *dma_handle, gfp_t flag,
134 NPU_DMA_OP_UNSUPPORTED();
138 static void dma_npu_free(struct device *dev, size_t size,
139 void *vaddr, dma_addr_t dma_handle,
142 NPU_DMA_OP_UNSUPPORTED();
145 static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
146 unsigned long offset, size_t size,
147 enum dma_data_direction direction,
150 NPU_DMA_OP_UNSUPPORTED();
154 static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
155 int nelems, enum dma_data_direction direction,
158 NPU_DMA_OP_UNSUPPORTED();
162 static int dma_npu_dma_supported(struct device *dev, u64 mask)
164 NPU_DMA_OP_UNSUPPORTED();
168 static u64 dma_npu_get_required_mask(struct device *dev)
170 NPU_DMA_OP_UNSUPPORTED();
174 static const struct dma_map_ops dma_npu_ops = {
175 .map_page = dma_npu_map_page,
176 .map_sg = dma_npu_map_sg,
177 .alloc = dma_npu_alloc,
178 .free = dma_npu_free,
179 .dma_supported = dma_npu_dma_supported,
180 .get_required_mask = dma_npu_get_required_mask,
184 * Returns the PE assoicated with the PCI device of the given
185 * NPU. Returns the linked pci device if pci_dev != NULL.
187 static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
188 struct pci_dev **gpdev)
191 struct pci_controller *hose;
192 struct pci_dev *pdev;
193 struct pnv_ioda_pe *pe;
196 pdev = pnv_pci_get_gpu_dev(npe->pdev);
200 pdn = pci_get_pdn(pdev);
201 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
204 hose = pci_bus_to_host(pdev->bus);
205 phb = hose->private_data;
206 pe = &phb->ioda.pe_array[pdn->pe_number];
214 long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
215 struct iommu_table *tbl)
217 struct pnv_phb *phb = npe->phb;
219 const unsigned long size = tbl->it_indirect_levels ?
220 tbl->it_level_size : tbl->it_size;
221 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
222 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
224 pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
225 start_addr, start_addr + win_size - 1,
226 IOMMU_PAGE_SIZE(tbl));
228 rc = opal_pci_map_pe_dma_window(phb->opal_id,
231 tbl->it_indirect_levels + 1,
234 IOMMU_PAGE_SIZE(tbl));
236 pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
239 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
241 /* Add the table to the list so its TCE cache will get invalidated */
242 pnv_pci_link_table_and_group(phb->hose->node, num,
243 tbl, &npe->table_group);
248 long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
250 struct pnv_phb *phb = npe->phb;
253 pe_info(npe, "Removing DMA window\n");
255 rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
257 0/* levels */, 0/* table address */,
258 0/* table size */, 0/* page size */);
260 pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
263 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
265 pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
272 * Enables 32 bit DMA on NPU.
274 static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
276 struct pci_dev *gpdev;
277 struct pnv_ioda_pe *gpe;
281 * Find the assoicated PCI devices and get the dma window
282 * information from there.
284 if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
287 gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
291 rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
294 * We don't initialise npu_pe->tce32_table as we always use
295 * dma_npu_ops which are nops.
297 set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
301 * Enables bypass mode on the NPU. The NPU only supports one
302 * window per link, so bypass needs to be explicitly enabled or
303 * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
304 * active at the same time.
306 static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
308 struct pnv_phb *phb = npe->phb;
310 phys_addr_t top = memblock_end_of_DRAM();
312 if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev)
315 rc = pnv_npu_unset_window(npe, 0);
316 if (rc != OPAL_SUCCESS)
319 /* Enable the bypass window */
321 top = roundup_pow_of_two(top);
322 dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
324 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
325 npe->pe_number, npe->pe_number,
326 0 /* bypass base */, top);
328 if (rc == OPAL_SUCCESS)
329 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
334 void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
339 struct pnv_ioda_pe *npe;
340 struct pci_dev *npdev;
343 npdev = pnv_pci_get_npu_dev(gpdev, i);
348 pdn = pci_get_pdn(npdev);
349 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
352 phb = pci_bus_to_host(npdev->bus)->private_data;
354 /* We only do bypass if it's enabled on the linked device */
355 npe = &phb->ioda.pe_array[pdn->pe_number];
358 dev_info(&npdev->dev,
359 "Using 64-bit DMA iommu bypass\n");
360 pnv_npu_dma_set_bypass(npe);
362 dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
363 pnv_npu_dma_set_32(npe);
368 /* Switch ownership from platform code to external user (e.g. VFIO) */
369 void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
371 struct pnv_phb *phb = npe->phb;
375 * Note: NPU has just a single TVE in the hardware which means that
376 * while used by the kernel, it can have either 32bit window or
377 * DMA bypass but never both. So we deconfigure 32bit window only
378 * if it was enabled at the moment of ownership change.
380 if (npe->table_group.tables[0]) {
381 pnv_npu_unset_window(npe, 0);
386 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
387 npe->pe_number, npe->pe_number,
388 0 /* bypass base */, 0);
390 pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
393 pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
396 struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
398 struct pnv_phb *phb = npe->phb;
399 struct pci_bus *pbus = phb->hose->bus;
400 struct pci_dev *npdev, *gpdev = NULL, *gptmp;
401 struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
406 list_for_each_entry(npdev, &pbus->devices, bus_list) {
407 gptmp = pnv_pci_get_gpu_dev(npdev);
412 pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev));
413 iommu_group_add_device(gpe->table_group.group, &npdev->dev);
419 /* Maximum number of nvlinks per npu */
420 #define NV_MAX_LINKS 6
422 /* Maximum index of npu2 hosts in the system. Always < NV_MAX_NPUS */
423 static int max_npu2_index;
426 struct mm_struct *mm;
427 struct pci_dev *npdev[NV_MAX_NPUS][NV_MAX_LINKS];
428 struct mmu_notifier mn;
432 /* Callback to stop translation requests on a given GPU */
433 void (*release_cb)(struct npu_context *context, void *priv);
436 * Private pointer passed to the above callback for usage by
442 struct mmio_atsd_reg {
448 * Find a free MMIO ATSD register and mark it in use. Return -ENOSPC
449 * if none are available.
451 static int get_mmio_atsd_reg(struct npu *npu)
455 for (i = 0; i < npu->mmio_atsd_count; i++) {
456 if (!test_bit(i, &npu->mmio_atsd_usage))
457 if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
464 static void put_mmio_atsd_reg(struct npu *npu, int reg)
466 clear_bit_unlock(reg, &npu->mmio_atsd_usage);
469 /* MMIO ATSD register offsets */
470 #define XTS_ATSD_AVA 1
471 #define XTS_ATSD_STAT 2
473 static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg,
474 unsigned long launch, unsigned long va)
476 struct npu *npu = mmio_atsd_reg->npu;
477 int reg = mmio_atsd_reg->reg;
479 __raw_writeq_be(va, npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA);
481 __raw_writeq_be(launch, npu->mmio_atsd_regs[reg]);
484 static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
485 unsigned long pid, bool flush)
488 unsigned long launch;
490 for (i = 0; i <= max_npu2_index; i++) {
491 if (mmio_atsd_reg[i].reg < 0)
494 /* IS set to invalidate matching PID */
495 launch = PPC_BIT(12);
497 /* PRS set to process-scoped */
498 launch |= PPC_BIT(13);
502 mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
505 launch |= pid << PPC_BITLSHIFT(38);
508 launch |= !flush << PPC_BITLSHIFT(39);
510 /* Invalidating the entire process doesn't use a va */
511 mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0);
515 static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
516 unsigned long va, unsigned long pid, bool flush)
519 unsigned long launch;
521 for (i = 0; i <= max_npu2_index; i++) {
522 if (mmio_atsd_reg[i].reg < 0)
525 /* IS set to invalidate target VA */
528 /* PRS set to process scoped */
529 launch |= PPC_BIT(13);
533 mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
536 launch |= pid << PPC_BITLSHIFT(38);
539 launch |= !flush << PPC_BITLSHIFT(39);
541 mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va);
545 #define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
547 static void mmio_invalidate_wait(
548 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
553 /* Wait for all invalidations to complete */
554 for (i = 0; i <= max_npu2_index; i++) {
555 if (mmio_atsd_reg[i].reg < 0)
558 /* Wait for completion */
559 npu = mmio_atsd_reg[i].npu;
560 reg = mmio_atsd_reg[i].reg;
561 while (__raw_readq(npu->mmio_atsd_regs[reg] + XTS_ATSD_STAT))
567 * Acquires all the address translation shootdown (ATSD) registers required to
568 * launch an ATSD on all links this npu_context is active on.
570 static void acquire_atsd_reg(struct npu_context *npu_context,
571 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
575 struct pci_dev *npdev;
576 struct pnv_phb *nphb;
578 for (i = 0; i <= max_npu2_index; i++) {
579 mmio_atsd_reg[i].reg = -1;
580 for (j = 0; j < NV_MAX_LINKS; j++) {
582 * There are no ordering requirements with respect to
583 * the setup of struct npu_context, but to ensure
584 * consistent behaviour we need to ensure npdev[][] is
587 npdev = READ_ONCE(npu_context->npdev[i][j]);
591 nphb = pci_bus_to_host(npdev->bus)->private_data;
593 mmio_atsd_reg[i].npu = npu;
594 mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
595 while (mmio_atsd_reg[i].reg < 0) {
596 mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
605 * Release previously acquired ATSD registers. To avoid deadlocks the registers
606 * must be released in the same order they were acquired above in
609 static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
613 for (i = 0; i <= max_npu2_index; i++) {
615 * We can't rely on npu_context->npdev[][] being the same here
616 * as when acquire_atsd_reg() was called, hence we use the
617 * values stored in mmio_atsd_reg during the acquire phase
618 * rather than re-reading npdev[][].
620 if (mmio_atsd_reg[i].reg < 0)
623 put_mmio_atsd_reg(mmio_atsd_reg[i].npu, mmio_atsd_reg[i].reg);
628 * Invalidate either a single address or an entire PID depending on
631 static void mmio_invalidate(struct npu_context *npu_context, int va,
632 unsigned long address, bool flush)
634 struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
635 unsigned long pid = npu_context->mm->context.id;
637 if (npu_context->nmmu_flush)
639 * Unfortunately the nest mmu does not support flushing specific
640 * addresses so we have to flush the whole mm once before
641 * shooting down the GPU translation.
643 flush_all_mm(npu_context->mm);
646 * Loop over all the NPUs this process is active on and launch
649 acquire_atsd_reg(npu_context, mmio_atsd_reg);
651 mmio_invalidate_va(mmio_atsd_reg, address, pid, flush);
653 mmio_invalidate_pid(mmio_atsd_reg, pid, flush);
655 mmio_invalidate_wait(mmio_atsd_reg);
658 * The GPU requires two flush ATSDs to ensure all entries have
659 * been flushed. We use PID 0 as it will never be used for a
660 * process on the GPU.
662 mmio_invalidate_pid(mmio_atsd_reg, 0, true);
663 mmio_invalidate_wait(mmio_atsd_reg);
664 mmio_invalidate_pid(mmio_atsd_reg, 0, true);
665 mmio_invalidate_wait(mmio_atsd_reg);
667 release_atsd_reg(mmio_atsd_reg);
670 static void pnv_npu2_mn_release(struct mmu_notifier *mn,
671 struct mm_struct *mm)
673 struct npu_context *npu_context = mn_to_npu_context(mn);
675 /* Call into device driver to stop requests to the NMMU */
676 if (npu_context->release_cb)
677 npu_context->release_cb(npu_context, npu_context->priv);
680 * There should be no more translation requests for this PID, but we
681 * need to ensure any entries for it are removed from the TLB.
683 mmio_invalidate(npu_context, 0, 0, true);
686 static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
687 struct mm_struct *mm,
688 unsigned long address,
691 struct npu_context *npu_context = mn_to_npu_context(mn);
693 mmio_invalidate(npu_context, 1, address, true);
696 static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
697 struct mm_struct *mm,
698 unsigned long start, unsigned long end)
700 struct npu_context *npu_context = mn_to_npu_context(mn);
701 unsigned long address;
703 if (end - start > atsd_threshold) {
705 * Just invalidate the entire PID if the address range is too
708 mmio_invalidate(npu_context, 0, 0, true);
710 for (address = start; address < end; address += PAGE_SIZE)
711 mmio_invalidate(npu_context, 1, address, false);
713 /* Do the flush only on the final addess == end */
714 mmio_invalidate(npu_context, 1, address, true);
718 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
719 .release = pnv_npu2_mn_release,
720 .change_pte = pnv_npu2_mn_change_pte,
721 .invalidate_range = pnv_npu2_mn_invalidate_range,
725 * Call into OPAL to setup the nmmu context for the current task in
726 * the NPU. This must be called to setup the context tables before the
727 * GPU issues ATRs. pdev should be a pointed to PCIe GPU device.
729 * A release callback should be registered to allow a device driver to
730 * be notified that it should not launch any new translation requests
731 * as the final TLB invalidate is about to occur.
733 * Returns an error if there no contexts are currently available or a
734 * npu_context which should be passed to pnv_npu2_handle_fault().
736 * mmap_sem must be held in write mode and must not be called from interrupt
739 struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
741 void (*cb)(struct npu_context *, void *),
746 struct device_node *nvlink_dn;
747 struct mm_struct *mm = current->mm;
748 struct pnv_phb *nphb;
750 struct npu_context *npu_context;
753 * At present we don't support GPUs connected to multiple NPUs and I'm
754 * not sure the hardware does either.
756 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
758 if (!firmware_has_feature(FW_FEATURE_OPAL))
759 return ERR_PTR(-ENODEV);
762 /* No nvlink associated with this GPU device */
763 return ERR_PTR(-ENODEV);
765 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
766 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
768 return ERR_PTR(-ENODEV);
770 if (!mm || mm->context.id == 0) {
772 * Kernel thread contexts are not supported and context id 0 is
773 * reserved on the GPU.
775 return ERR_PTR(-EINVAL);
778 nphb = pci_bus_to_host(npdev->bus)->private_data;
782 * Setup the NPU context table for a particular GPU. These need to be
783 * per-GPU as we need the tables to filter ATSDs when there are no
784 * active contexts on a particular GPU. It is safe for these to be
785 * called concurrently with destroy as the OPAL call takes appropriate
786 * locks and refcounts on init/destroy.
788 rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
789 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
791 return ERR_PTR(-ENOSPC);
794 * We store the npu pci device so we can more easily get at the
797 spin_lock(&npu_context_lock);
798 npu_context = mm->context.npu_context;
800 if (npu_context->release_cb != cb ||
801 npu_context->priv != priv) {
802 spin_unlock(&npu_context_lock);
803 opal_npu_destroy_context(nphb->opal_id, mm->context.id,
804 PCI_DEVID(gpdev->bus->number,
806 return ERR_PTR(-EINVAL);
809 WARN_ON(!kref_get_unless_zero(&npu_context->kref));
811 spin_unlock(&npu_context_lock);
815 * We can set up these fields without holding the
816 * npu_context_lock as the npu_context hasn't been returned to
817 * the caller meaning it can't be destroyed. Parallel allocation
818 * is protected against by mmap_sem.
821 npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
823 kref_init(&npu_context->kref);
824 npu_context->mm = mm;
825 npu_context->mn.ops = &nv_nmmu_notifier_ops;
826 rc = __mmu_notifier_register(&npu_context->mn, mm);
831 opal_npu_destroy_context(nphb->opal_id, mm->context.id,
832 PCI_DEVID(gpdev->bus->number,
837 mm->context.npu_context = npu_context;
840 npu_context->release_cb = cb;
841 npu_context->priv = priv;
844 * npdev is a pci_dev pointer setup by the PCI code. We assign it to
845 * npdev[][] to indicate to the mmu notifiers that an invalidation
846 * should also be sent over this nvlink. The notifiers don't use any
847 * other fields in npu_context, so we just need to ensure that when they
848 * deference npu_context->npdev[][] it is either a valid pointer or
851 WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
853 if (!nphb->npu.nmmu_flush) {
855 * If we're not explicitly flushing ourselves we need to mark
856 * the thread for global flushes
858 npu_context->nmmu_flush = false;
859 mm_context_add_copro(mm);
861 npu_context->nmmu_flush = true;
865 EXPORT_SYMBOL(pnv_npu2_init_context);
867 static void pnv_npu2_release_context(struct kref *kref)
869 struct npu_context *npu_context =
870 container_of(kref, struct npu_context, kref);
872 if (!npu_context->nmmu_flush)
873 mm_context_remove_copro(npu_context->mm);
875 npu_context->mm->context.npu_context = NULL;
879 * Destroy a context on the given GPU. May free the npu_context if it is no
880 * longer active on any GPUs. Must not be called from interrupt context.
882 void pnv_npu2_destroy_context(struct npu_context *npu_context,
883 struct pci_dev *gpdev)
886 struct pnv_phb *nphb;
888 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
889 struct device_node *nvlink_dn;
895 if (!firmware_has_feature(FW_FEATURE_OPAL))
898 nphb = pci_bus_to_host(npdev->bus)->private_data;
900 nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
901 if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
904 WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
905 opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
906 PCI_DEVID(gpdev->bus->number, gpdev->devfn));
907 spin_lock(&npu_context_lock);
908 removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
909 spin_unlock(&npu_context_lock);
912 * We need to do this outside of pnv_npu2_release_context so that it is
913 * outside the spinlock as mmu_notifier_destroy uses SRCU.
916 mmu_notifier_unregister(&npu_context->mn,
923 EXPORT_SYMBOL(pnv_npu2_destroy_context);
926 * Assumes mmap_sem is held for the contexts associated mm.
928 int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
929 unsigned long *flags, unsigned long *status, int count)
931 u64 rc = 0, result = 0;
933 struct page *page[1];
935 /* mmap_sem should be held so the struct_mm must be present */
936 struct mm_struct *mm = context->mm;
938 if (!firmware_has_feature(FW_FEATURE_OPAL))
941 WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
943 for (i = 0; i < count; i++) {
944 is_write = flags[i] & NPU2_WRITE;
945 rc = get_user_pages_remote(NULL, mm, ea[i], 1,
946 is_write ? FOLL_WRITE : 0,
950 * To support virtualised environments we will have to do an
951 * access to the page to ensure it gets faulted into the
952 * hypervisor. For the moment virtualisation is not supported in
953 * other areas so leave the access out.
967 EXPORT_SYMBOL(pnv_npu2_handle_fault);
969 int pnv_npu2_init(struct pnv_phb *phb)
973 struct device_node *dn;
974 struct pci_dev *gpdev;
975 static int npu_index;
978 if (!atsd_threshold_dentry) {
979 atsd_threshold_dentry = debugfs_create_x64("atsd_threshold",
980 0600, powerpc_debugfs_root, &atsd_threshold);
983 phb->npu.nmmu_flush =
984 of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
985 for_each_child_of_node(phb->hose->dn, dn) {
986 gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
988 rc = opal_npu_map_lpar(phb->opal_id,
989 PCI_DEVID(gpdev->bus->number, gpdev->devfn),
993 "Error %lld mapping device to LPAR\n",
998 for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
1000 phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
1002 pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
1003 phb->npu.mmio_atsd_count = i;
1004 phb->npu.mmio_atsd_usage = 0;
1006 if (WARN_ON(npu_index >= NV_MAX_NPUS))
1008 max_npu2_index = npu_index;
1009 phb->npu.index = npu_index;