2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/memblock.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34 #include <linux/pci.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/crash_dump.h>
37 #include <linux/memory.h>
39 #include <linux/iommu.h>
40 #include <linux/rculist.h>
44 #include <asm/iommu.h>
45 #include <asm/pci-bridge.h>
46 #include <asm/machdep.h>
47 #include <asm/firmware.h>
49 #include <asm/ppc-pci.h>
51 #include <asm/mmzone.h>
52 #include <asm/plpar_wrappers.h>
56 static struct iommu_table_group *iommu_pseries_alloc_group(int node)
58 struct iommu_table_group *table_group;
59 struct iommu_table *tbl;
60 struct iommu_table_group_link *tgl;
62 table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL,
67 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
71 tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
76 INIT_LIST_HEAD_RCU(&tbl->it_group_list);
77 kref_init(&tbl->it_kref);
78 tgl->table_group = table_group;
79 list_add_rcu(&tgl->next, &tbl->it_group_list);
81 table_group->tables[0] = tbl;
92 static void iommu_pseries_free_group(struct iommu_table_group *table_group,
93 const char *node_name)
95 struct iommu_table *tbl;
96 #ifdef CONFIG_IOMMU_API
97 struct iommu_table_group_link *tgl;
103 tbl = table_group->tables[0];
104 #ifdef CONFIG_IOMMU_API
105 tgl = list_first_entry_or_null(&tbl->it_group_list,
106 struct iommu_table_group_link, next);
110 list_del_rcu(&tgl->next);
113 if (table_group->group) {
114 iommu_group_put(table_group->group);
115 BUG_ON(table_group->group);
118 iommu_tce_table_put(tbl);
123 static int tce_build_pSeries(struct iommu_table *tbl, long index,
124 long npages, unsigned long uaddr,
125 enum dma_data_direction direction,
132 proto_tce = TCE_PCI_READ; // Read allowed
134 if (direction != DMA_TO_DEVICE)
135 proto_tce |= TCE_PCI_WRITE;
137 tces = tcep = ((__be64 *)tbl->it_base) + index;
140 /* can't move this out since we might cross MEMBLOCK boundary */
141 rpn = __pa(uaddr) >> TCE_SHIFT;
142 *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
144 uaddr += TCE_PAGE_SIZE;
151 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
155 tces = tcep = ((__be64 *)tbl->it_base) + index;
161 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
165 tcep = ((__be64 *)tbl->it_base) + index;
167 return be64_to_cpu(*tcep);
170 static void tce_free_pSeriesLP(unsigned long liobn, long, long);
171 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
173 static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
174 long npages, unsigned long uaddr,
175 enum dma_data_direction direction,
182 long tcenum_start = tcenum, npages_start = npages;
184 rpn = __pa(uaddr) >> tceshift;
185 proto_tce = TCE_PCI_READ;
186 if (direction != DMA_TO_DEVICE)
187 proto_tce |= TCE_PCI_WRITE;
190 tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
191 rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
193 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
195 tce_free_pSeriesLP(liobn, tcenum_start,
196 (npages_start - (npages + 1)));
200 if (rc && printk_ratelimit()) {
201 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
202 printk("\tindex = 0x%llx\n", (u64)liobn);
203 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
204 printk("\ttce val = 0x%llx\n", tce );
214 static DEFINE_PER_CPU(__be64 *, tce_page);
216 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
217 long npages, unsigned long uaddr,
218 enum dma_data_direction direction,
226 long tcenum_start = tcenum, npages_start = npages;
230 if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
231 return tce_build_pSeriesLP(tbl->it_index, tcenum,
232 tbl->it_page_shift, npages, uaddr,
236 local_irq_save(flags); /* to protect tcep and the page behind it */
238 tcep = __this_cpu_read(tce_page);
240 /* This is safe to do since interrupts are off when we're called
241 * from iommu_alloc{,_sg}()
244 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
245 /* If allocation fails, fall back to the loop implementation */
247 local_irq_restore(flags);
248 return tce_build_pSeriesLP(tbl->it_index, tcenum,
250 npages, uaddr, direction, attrs);
252 __this_cpu_write(tce_page, tcep);
255 rpn = __pa(uaddr) >> TCE_SHIFT;
256 proto_tce = TCE_PCI_READ;
257 if (direction != DMA_TO_DEVICE)
258 proto_tce |= TCE_PCI_WRITE;
260 /* We can map max one pageful of TCEs at a time */
263 * Set up the page with TCE data, looping through and setting
266 limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
268 for (l = 0; l < limit; l++) {
269 tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
273 rc = plpar_tce_put_indirect((u64)tbl->it_index,
280 } while (npages > 0 && !rc);
282 local_irq_restore(flags);
284 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
286 tce_freemulti_pSeriesLP(tbl, tcenum_start,
287 (npages_start - (npages + limit)));
291 if (rc && printk_ratelimit()) {
292 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
293 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
294 printk("\tnpages = 0x%llx\n", (u64)npages);
295 printk("\ttce[0] val = 0x%llx\n", tcep[0]);
301 static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
306 rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
308 if (rc && printk_ratelimit()) {
309 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
310 printk("\tindex = 0x%llx\n", (u64)liobn);
311 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
320 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
324 if (!firmware_has_feature(FW_FEATURE_MULTITCE))
325 return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
327 rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
329 if (rc && printk_ratelimit()) {
330 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
331 printk("\trc = %lld\n", rc);
332 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
333 printk("\tnpages = 0x%llx\n", (u64)npages);
338 static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
341 unsigned long tce_ret;
343 rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
345 if (rc && printk_ratelimit()) {
346 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
347 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
348 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
355 /* this is compatible with cells for the device tree property */
356 struct dynamic_dma_window_prop {
357 __be32 liobn; /* tce table number */
358 __be64 dma_base; /* address hi,lo */
359 __be32 tce_shift; /* ilog2(tce_page_size) */
360 __be32 window_shift; /* ilog2(tce_window_size) */
363 struct direct_window {
364 struct device_node *device;
365 const struct dynamic_dma_window_prop *prop;
366 struct list_head list;
369 /* Dynamic DMA Window support */
370 struct ddw_query_response {
371 u32 windows_available;
372 u32 largest_available_block;
374 u32 migration_capable;
377 struct ddw_create_response {
383 static LIST_HEAD(direct_window_list);
384 /* prevents races between memory on/offline and window creation */
385 static DEFINE_SPINLOCK(direct_window_list_lock);
386 /* protects initializing window twice for same device */
387 static DEFINE_MUTEX(direct_window_init_mutex);
388 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
390 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
391 unsigned long num_pfn, const void *arg)
393 const struct dynamic_dma_window_prop *maprange = arg;
395 u64 tce_size, num_tce, dma_offset, next;
399 tce_shift = be32_to_cpu(maprange->tce_shift);
400 tce_size = 1ULL << tce_shift;
401 next = start_pfn << PAGE_SHIFT;
402 num_tce = num_pfn << PAGE_SHIFT;
404 /* round back to the beginning of the tce page size */
405 num_tce += next & (tce_size - 1);
406 next &= ~(tce_size - 1);
408 /* covert to number of tces */
409 num_tce |= tce_size - 1;
410 num_tce >>= tce_shift;
414 * Set up the page with TCE data, looping through and setting
417 limit = min_t(long, num_tce, 512);
418 dma_offset = next + be64_to_cpu(maprange->dma_base);
420 rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
423 next += limit * tce_size;
425 } while (num_tce > 0 && !rc);
430 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
431 unsigned long num_pfn, const void *arg)
433 const struct dynamic_dma_window_prop *maprange = arg;
434 u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
440 if (!firmware_has_feature(FW_FEATURE_MULTITCE)) {
441 unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
442 unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
443 be64_to_cpu(maprange->dma_base);
444 unsigned long tcenum = dmastart >> tceshift;
445 unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
446 void *uaddr = __va(start_pfn << PAGE_SHIFT);
448 return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
449 tcenum, tceshift, npages, (unsigned long) uaddr,
450 DMA_BIDIRECTIONAL, 0);
453 local_irq_disable(); /* to protect tcep and the page behind it */
454 tcep = __this_cpu_read(tce_page);
457 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
462 __this_cpu_write(tce_page, tcep);
465 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
467 liobn = (u64)be32_to_cpu(maprange->liobn);
468 tce_shift = be32_to_cpu(maprange->tce_shift);
469 tce_size = 1ULL << tce_shift;
470 next = start_pfn << PAGE_SHIFT;
471 num_tce = num_pfn << PAGE_SHIFT;
473 /* round back to the beginning of the tce page size */
474 num_tce += next & (tce_size - 1);
475 next &= ~(tce_size - 1);
477 /* covert to number of tces */
478 num_tce |= tce_size - 1;
479 num_tce >>= tce_shift;
481 /* We can map max one pageful of TCEs at a time */
484 * Set up the page with TCE data, looping through and setting
487 limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
488 dma_offset = next + be64_to_cpu(maprange->dma_base);
490 for (l = 0; l < limit; l++) {
491 tcep[l] = cpu_to_be64(proto_tce | next);
495 rc = plpar_tce_put_indirect(liobn,
501 } while (num_tce > 0 && !rc);
503 /* error cleanup: caller will clear whole range */
509 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
510 unsigned long num_pfn, void *arg)
512 return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
515 static void iommu_table_setparms(struct pci_controller *phb,
516 struct device_node *dn,
517 struct iommu_table *tbl)
519 struct device_node *node;
520 const unsigned long *basep;
525 basep = of_get_property(node, "linux,tce-base", NULL);
526 sizep = of_get_property(node, "linux,tce-size", NULL);
527 if (basep == NULL || sizep == NULL) {
528 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has "
529 "missing tce entries !\n", dn);
533 tbl->it_base = (unsigned long)__va(*basep);
535 if (!is_kdump_kernel())
536 memset((void *)tbl->it_base, 0, *sizep);
538 tbl->it_busno = phb->bus->number;
539 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
541 /* Units of tce entries */
542 tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
544 /* Test if we are going over 2GB of DMA space */
545 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
546 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
547 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
550 phb->dma_window_base_cur += phb->dma_window_size;
552 /* Set the tce table size - measured in entries */
553 tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
556 tbl->it_blocksize = 16;
557 tbl->it_type = TCE_PCI;
561 * iommu_table_setparms_lpar
563 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
565 static void iommu_table_setparms_lpar(struct pci_controller *phb,
566 struct device_node *dn,
567 struct iommu_table *tbl,
568 struct iommu_table_group *table_group,
569 const __be32 *dma_window)
571 unsigned long offset, size;
573 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
575 tbl->it_busno = phb->bus->number;
576 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
578 tbl->it_blocksize = 16;
579 tbl->it_type = TCE_PCI;
580 tbl->it_offset = offset >> tbl->it_page_shift;
581 tbl->it_size = size >> tbl->it_page_shift;
583 table_group->tce32_start = offset;
584 table_group->tce32_size = size;
587 struct iommu_table_ops iommu_table_pseries_ops = {
588 .set = tce_build_pSeries,
589 .clear = tce_free_pSeries,
590 .get = tce_get_pseries
593 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
595 struct device_node *dn;
596 struct iommu_table *tbl;
597 struct device_node *isa_dn, *isa_dn_orig;
598 struct device_node *tmp;
602 dn = pci_bus_to_OF_node(bus);
604 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn);
607 /* This is not a root bus, any setup will be done for the
608 * device-side of the bridge in iommu_dev_setup_pSeries().
614 /* Check if the ISA bus on the system is under
617 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
619 while (isa_dn && isa_dn != dn)
620 isa_dn = isa_dn->parent;
622 of_node_put(isa_dn_orig);
624 /* Count number of direct PCI children of the PHB. */
625 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
628 pr_debug("Children: %d\n", children);
630 /* Calculate amount of DMA window per slot. Each window must be
631 * a power of two (due to pci_alloc_consistent requirements).
633 * Keep 256MB aside for PHBs with ISA.
637 /* No ISA/IDE - just set window size and return */
638 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
640 while (pci->phb->dma_window_size * children > 0x80000000ul)
641 pci->phb->dma_window_size >>= 1;
642 pr_debug("No ISA/IDE, window size is 0x%llx\n",
643 pci->phb->dma_window_size);
644 pci->phb->dma_window_base_cur = 0;
649 /* If we have ISA, then we probably have an IDE
650 * controller too. Allocate a 128MB table but
651 * skip the first 128MB to avoid stepping on ISA
654 pci->phb->dma_window_size = 0x8000000ul;
655 pci->phb->dma_window_base_cur = 0x8000000ul;
657 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
658 tbl = pci->table_group->tables[0];
660 iommu_table_setparms(pci->phb, dn, tbl);
661 tbl->it_ops = &iommu_table_pseries_ops;
662 iommu_init_table(tbl, pci->phb->node);
663 iommu_register_group(pci->table_group, pci_domain_nr(bus), 0);
665 /* Divide the rest (1.75GB) among the children */
666 pci->phb->dma_window_size = 0x80000000ul;
667 while (pci->phb->dma_window_size * children > 0x70000000ul)
668 pci->phb->dma_window_size >>= 1;
670 pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
673 #ifdef CONFIG_IOMMU_API
674 static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
675 long *tce, enum dma_data_direction *direction)
678 unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
679 unsigned long flags, oldtce = 0;
680 u64 proto_tce = iommu_direction_to_tce_perm(*direction);
681 unsigned long newtce = *tce | proto_tce;
683 spin_lock_irqsave(&tbl->large_pool.lock, flags);
685 rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
687 rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
690 *direction = iommu_tce_direction(oldtce);
691 *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
694 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
700 struct iommu_table_ops iommu_table_lpar_multi_ops = {
701 .set = tce_buildmulti_pSeriesLP,
702 #ifdef CONFIG_IOMMU_API
703 .exchange = tce_exchange_pseries,
705 .clear = tce_freemulti_pSeriesLP,
706 .get = tce_get_pSeriesLP
709 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
711 struct iommu_table *tbl;
712 struct device_node *dn, *pdn;
714 const __be32 *dma_window = NULL;
716 dn = pci_bus_to_OF_node(bus);
718 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
721 /* Find nearest ibm,dma-window, walking up the device tree */
722 for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
723 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
724 if (dma_window != NULL)
728 if (dma_window == NULL) {
729 pr_debug(" no ibm,dma-window property !\n");
735 pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
736 pdn, ppci->table_group);
738 if (!ppci->table_group) {
739 ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
740 tbl = ppci->table_group->tables[0];
741 iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
742 ppci->table_group, dma_window);
743 tbl->it_ops = &iommu_table_lpar_multi_ops;
744 iommu_init_table(tbl, ppci->phb->node);
745 iommu_register_group(ppci->table_group,
746 pci_domain_nr(bus), 0);
747 pr_debug(" created table: %p\n", ppci->table_group);
752 static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
754 struct device_node *dn;
755 struct iommu_table *tbl;
757 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
759 dn = dev->dev.of_node;
761 /* If we're the direct child of a root bus, then we need to allocate
762 * an iommu table ourselves. The bus setup code should have setup
763 * the window sizes already.
765 if (!dev->bus->self) {
766 struct pci_controller *phb = PCI_DN(dn)->phb;
768 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
769 PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
770 tbl = PCI_DN(dn)->table_group->tables[0];
771 iommu_table_setparms(phb, dn, tbl);
772 tbl->it_ops = &iommu_table_pseries_ops;
773 iommu_init_table(tbl, phb->node);
774 iommu_register_group(PCI_DN(dn)->table_group,
775 pci_domain_nr(phb->bus), 0);
776 set_iommu_table_base(&dev->dev, tbl);
777 iommu_add_device(&dev->dev);
781 /* If this device is further down the bus tree, search upwards until
782 * an already allocated iommu table is found and use that.
785 while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL)
788 if (dn && PCI_DN(dn)) {
789 set_iommu_table_base(&dev->dev,
790 PCI_DN(dn)->table_group->tables[0]);
791 iommu_add_device(&dev->dev);
793 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
797 static int __read_mostly disable_ddw;
799 static int __init disable_ddw_setup(char *str)
802 printk(KERN_INFO "ppc iommu: disabling ddw.\n");
807 early_param("disable_ddw", disable_ddw_setup);
809 static void remove_ddw(struct device_node *np, bool remove_prop)
811 struct dynamic_dma_window_prop *dwp;
812 struct property *win64;
817 ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
820 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
824 if (ret || win64->length < sizeof(*dwp))
828 liobn = (u64)be32_to_cpu(dwp->liobn);
830 /* clear the whole window, note the arg is in kernel pages */
831 ret = tce_clearrange_multi_pSeriesLP(0,
832 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
834 pr_warn("%pOF failed to clear tces in window.\n",
837 pr_debug("%pOF successfully cleared tces in window.\n",
840 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
842 pr_warn("%pOF: failed to remove direct window: rtas returned "
843 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
844 np, ret, ddw_avail[2], liobn);
846 pr_debug("%pOF: successfully removed direct window: rtas returned "
847 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
848 np, ret, ddw_avail[2], liobn);
852 ret = of_remove_property(np, win64);
854 pr_warn("%pOF: failed to remove direct window property: %d\n",
858 static u64 find_existing_ddw(struct device_node *pdn)
860 struct direct_window *window;
861 const struct dynamic_dma_window_prop *direct64;
864 spin_lock(&direct_window_list_lock);
865 /* check if we already created a window and dupe that config if so */
866 list_for_each_entry(window, &direct_window_list, list) {
867 if (window->device == pdn) {
868 direct64 = window->prop;
869 dma_addr = be64_to_cpu(direct64->dma_base);
873 spin_unlock(&direct_window_list_lock);
878 static int find_existing_ddw_windows(void)
881 struct device_node *pdn;
882 struct direct_window *window;
883 const struct dynamic_dma_window_prop *direct64;
885 if (!firmware_has_feature(FW_FEATURE_LPAR))
888 for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
889 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
893 window = kzalloc(sizeof(*window), GFP_KERNEL);
894 if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
896 remove_ddw(pdn, true);
900 window->device = pdn;
901 window->prop = direct64;
902 spin_lock(&direct_window_list_lock);
903 list_add(&window->list, &direct_window_list);
904 spin_unlock(&direct_window_list_lock);
909 machine_arch_initcall(pseries, find_existing_ddw_windows);
911 static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
912 struct ddw_query_response *query)
914 struct device_node *dn;
921 * Get the config address and phb buid of the PE window.
922 * Rely on eeh to retrieve this for us.
923 * Retrieve them from the pci device, not the node with the
924 * dma-window property
926 dn = pci_device_to_OF_node(dev);
928 buid = pdn->phb->buid;
929 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
931 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
932 cfg_addr, BUID_HI(buid), BUID_LO(buid));
933 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
934 " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
939 static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
940 struct ddw_create_response *create, int page_shift,
943 struct device_node *dn;
950 * Get the config address and phb buid of the PE window.
951 * Rely on eeh to retrieve this for us.
952 * Retrieve them from the pci device, not the node with the
953 * dma-window property
955 dn = pci_device_to_OF_node(dev);
957 buid = pdn->phb->buid;
958 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
961 /* extra outputs are LIOBN and dma-addr (hi, lo) */
962 ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create,
963 cfg_addr, BUID_HI(buid), BUID_LO(buid),
964 page_shift, window_shift);
965 } while (rtas_busy_delay(ret));
967 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
968 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
969 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
970 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
975 struct failed_ddw_pdn {
976 struct device_node *pdn;
977 struct list_head list;
980 static LIST_HEAD(failed_ddw_pdn_list);
983 * If the PE supports dynamic dma windows, and there is space for a table
984 * that can map all pages in a linear offset, then setup such a table,
985 * and record the dma-offset in the struct device.
987 * dev: the pci device we are checking
988 * pdn: the parent pe node with the ibm,dma_window property
989 * Future: also check if we can remap the base window for our base page size
991 * returns the dma offset for use by dma_set_mask
993 static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
996 struct ddw_query_response query;
997 struct ddw_create_response create;
999 u64 dma_addr, max_addr;
1000 struct device_node *dn;
1002 struct direct_window *window;
1003 struct property *win64;
1004 struct dynamic_dma_window_prop *ddwprop;
1005 struct failed_ddw_pdn *fpdn;
1007 mutex_lock(&direct_window_init_mutex);
1009 dma_addr = find_existing_ddw(pdn);
1014 * If we already went through this for a previous function of
1015 * the same device and failed, we don't want to muck with the
1016 * DMA window again, as it will race with in-flight operations
1017 * and can lead to EEHs. The above mutex protects access to the
1020 list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
1021 if (fpdn->pdn == pdn)
1026 * the ibm,ddw-applicable property holds the tokens for:
1027 * ibm,query-pe-dma-window
1028 * ibm,create-pe-dma-window
1029 * ibm,remove-pe-dma-window
1030 * for the given node in that order.
1031 * the property is actually in the parent, not the PE
1033 ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
1039 * Query if there is a second window of size to map the
1040 * whole partition. Query returns number of windows, largest
1041 * block assigned to PE (partition endpoint), and two bitmasks
1042 * of page sizes: supported and supported for migrate-dma.
1044 dn = pci_device_to_OF_node(dev);
1045 ret = query_ddw(dev, ddw_avail, &query);
1049 if (query.windows_available == 0) {
1051 * no additional windows are available for this device.
1052 * We might be able to reallocate the existing window,
1053 * trading in for a larger page size.
1055 dev_dbg(&dev->dev, "no free dynamic windows");
1058 if (query.page_size & 4) {
1059 page_shift = 24; /* 16MB */
1060 } else if (query.page_size & 2) {
1061 page_shift = 16; /* 64kB */
1062 } else if (query.page_size & 1) {
1063 page_shift = 12; /* 4kB */
1065 dev_dbg(&dev->dev, "no supported direct page size in mask %x",
1069 /* verify the window * number of ptes will map the partition */
1070 /* check largest block * page size > max memory hotplug addr */
1071 max_addr = memory_hotplug_max();
1072 if (query.largest_available_block < (max_addr >> page_shift)) {
1073 dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u "
1074 "%llu-sized pages\n", max_addr, query.largest_available_block,
1075 1ULL << page_shift);
1078 len = order_base_2(max_addr);
1079 win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
1082 "couldn't allocate property for 64bit dma window\n");
1085 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
1086 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
1087 win64->length = sizeof(*ddwprop);
1088 if (!win64->name || !win64->value) {
1090 "couldn't allocate property name and value\n");
1094 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
1098 ddwprop->liobn = cpu_to_be32(create.liobn);
1099 ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) |
1101 ddwprop->tce_shift = cpu_to_be32(page_shift);
1102 ddwprop->window_shift = cpu_to_be32(len);
1104 dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n",
1107 window = kzalloc(sizeof(*window), GFP_KERNEL);
1109 goto out_clear_window;
1111 ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
1112 win64->value, tce_setrange_multi_pSeriesLP_walk);
1114 dev_info(&dev->dev, "failed to map direct window for %pOF: %d\n",
1116 goto out_free_window;
1119 ret = of_add_property(pdn, win64);
1121 dev_err(&dev->dev, "unable to add dma window property for %pOF: %d",
1123 goto out_free_window;
1126 window->device = pdn;
1127 window->prop = ddwprop;
1128 spin_lock(&direct_window_list_lock);
1129 list_add(&window->list, &direct_window_list);
1130 spin_unlock(&direct_window_list_lock);
1132 dma_addr = be64_to_cpu(ddwprop->dma_base);
1139 remove_ddw(pdn, true);
1143 kfree(win64->value);
1148 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1152 list_add(&fpdn->list, &failed_ddw_pdn_list);
1155 mutex_unlock(&direct_window_init_mutex);
1159 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1161 struct device_node *pdn, *dn;
1162 struct iommu_table *tbl;
1163 const __be32 *dma_window = NULL;
1166 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
1168 /* dev setup for LPAR is a little tricky, since the device tree might
1169 * contain the dma-window properties per-device and not necessarily
1170 * for the bus. So we need to search upwards in the tree until we
1171 * either hit a dma-window property, OR find a parent with a table
1172 * already allocated.
1174 dn = pci_device_to_OF_node(dev);
1175 pr_debug(" node is %pOF\n", dn);
1177 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
1178 pdn = pdn->parent) {
1179 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1184 if (!pdn || !PCI_DN(pdn)) {
1185 printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
1186 "no DMA window found for pci dev=%s dn=%pOF\n",
1190 pr_debug(" parent is %pOF\n", pdn);
1193 if (!pci->table_group) {
1194 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
1195 tbl = pci->table_group->tables[0];
1196 iommu_table_setparms_lpar(pci->phb, pdn, tbl,
1197 pci->table_group, dma_window);
1198 tbl->it_ops = &iommu_table_lpar_multi_ops;
1199 iommu_init_table(tbl, pci->phb->node);
1200 iommu_register_group(pci->table_group,
1201 pci_domain_nr(pci->phb->bus), 0);
1202 pr_debug(" created table: %p\n", pci->table_group);
1204 pr_debug(" found DMA window, table: %p\n", pci->table_group);
1207 set_iommu_table_base(&dev->dev, pci->table_group->tables[0]);
1208 iommu_add_device(&dev->dev);
1211 static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1213 bool ddw_enabled = false;
1214 struct device_node *pdn, *dn;
1215 struct pci_dev *pdev;
1216 const __be32 *dma_window = NULL;
1222 if (!dev_is_pci(dev))
1225 pdev = to_pci_dev(dev);
1227 /* only attempt to use a new window if 64-bit DMA is requested */
1228 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
1229 dn = pci_device_to_OF_node(pdev);
1230 dev_dbg(dev, "node is %pOF\n", dn);
1233 * the device tree might contain the dma-window properties
1234 * per-device and not necessarily for the bus. So we need to
1235 * search upwards in the tree until we either hit a dma-window
1236 * property, OR find a parent with a table already allocated.
1238 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
1239 pdn = pdn->parent) {
1240 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1244 if (pdn && PCI_DN(pdn)) {
1245 dma_offset = enable_ddw(pdev, pdn);
1246 if (dma_offset != 0) {
1247 dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
1248 set_dma_offset(dev, dma_offset);
1249 set_dma_ops(dev, &dma_nommu_ops);
1255 /* fall back on iommu ops */
1256 if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
1257 dev_info(dev, "Restoring 32-bit DMA via iommu\n");
1258 set_dma_ops(dev, &dma_iommu_ops);
1262 if (!dma_supported(dev, dma_mask))
1265 *dev->dma_mask = dma_mask;
1269 static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
1274 if (!disable_ddw && dev_is_pci(dev)) {
1275 struct pci_dev *pdev = to_pci_dev(dev);
1276 struct device_node *dn;
1278 dn = pci_device_to_OF_node(pdev);
1280 /* search upwards for ibm,dma-window */
1281 for (; dn && PCI_DN(dn) && !PCI_DN(dn)->table_group;
1283 if (of_get_property(dn, "ibm,dma-window", NULL))
1285 /* if there is a ibm,ddw-applicable property require 64 bits */
1286 if (dn && PCI_DN(dn) &&
1287 of_get_property(dn, "ibm,ddw-applicable", NULL))
1288 return DMA_BIT_MASK(64);
1291 return dma_iommu_ops.get_required_mask(dev);
1294 static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
1297 struct direct_window *window;
1298 struct memory_notify *arg = data;
1302 case MEM_GOING_ONLINE:
1303 spin_lock(&direct_window_list_lock);
1304 list_for_each_entry(window, &direct_window_list, list) {
1305 ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
1306 arg->nr_pages, window->prop);
1309 spin_unlock(&direct_window_list_lock);
1311 case MEM_CANCEL_ONLINE:
1313 spin_lock(&direct_window_list_lock);
1314 list_for_each_entry(window, &direct_window_list, list) {
1315 ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
1316 arg->nr_pages, window->prop);
1319 spin_unlock(&direct_window_list_lock);
1324 if (ret && action != MEM_CANCEL_ONLINE)
1330 static struct notifier_block iommu_mem_nb = {
1331 .notifier_call = iommu_mem_notifier,
1334 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
1336 int err = NOTIFY_OK;
1337 struct of_reconfig_data *rd = data;
1338 struct device_node *np = rd->dn;
1339 struct pci_dn *pci = PCI_DN(np);
1340 struct direct_window *window;
1343 case OF_RECONFIG_DETACH_NODE:
1345 * Removing the property will invoke the reconfig
1346 * notifier again, which causes dead-lock on the
1347 * read-write semaphore of the notifier chain. So
1348 * we have to remove the property when releasing
1351 remove_ddw(np, false);
1352 if (pci && pci->table_group)
1353 iommu_pseries_free_group(pci->table_group,
1356 spin_lock(&direct_window_list_lock);
1357 list_for_each_entry(window, &direct_window_list, list) {
1358 if (window->device == np) {
1359 list_del(&window->list);
1364 spin_unlock(&direct_window_list_lock);
1373 static struct notifier_block iommu_reconfig_nb = {
1374 .notifier_call = iommu_reconfig_notifier,
1377 /* These are called very early. */
1378 void iommu_init_early_pSeries(void)
1380 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
1383 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1384 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1385 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1386 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
1387 ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
1389 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
1390 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
1394 of_reconfig_notifier_register(&iommu_reconfig_nb);
1395 register_memory_notifier(&iommu_mem_nb);
1397 set_pci_dma_ops(&dma_iommu_ops);
1400 static int __init disable_multitce(char *str)
1402 if (strcmp(str, "off") == 0 &&
1403 firmware_has_feature(FW_FEATURE_LPAR) &&
1404 firmware_has_feature(FW_FEATURE_MULTITCE)) {
1405 printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
1406 powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
1411 __setup("multitce=", disable_multitce);
1413 machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);