2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 #include <linux/init.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
31 #include <linux/memblock.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34 #include <linux/pci.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/crash_dump.h>
37 #include <linux/memory.h>
39 #include <linux/iommu.h>
40 #include <linux/rculist.h>
44 #include <asm/iommu.h>
45 #include <asm/pci-bridge.h>
46 #include <asm/machdep.h>
47 #include <asm/firmware.h>
49 #include <asm/ppc-pci.h>
51 #include <asm/mmzone.h>
52 #include <asm/plpar_wrappers.h>
56 static struct iommu_table_group *iommu_pseries_alloc_group(int node)
58 struct iommu_table_group *table_group = NULL;
59 struct iommu_table *tbl = NULL;
60 struct iommu_table_group_link *tgl = NULL;
62 table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL,
67 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
71 tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
76 INIT_LIST_HEAD_RCU(&tbl->it_group_list);
77 kref_init(&tbl->it_kref);
78 tgl->table_group = table_group;
79 list_add_rcu(&tgl->next, &tbl->it_group_list);
81 table_group->tables[0] = tbl;
93 static void iommu_pseries_free_group(struct iommu_table_group *table_group,
94 const char *node_name)
96 struct iommu_table *tbl;
97 #ifdef CONFIG_IOMMU_API
98 struct iommu_table_group_link *tgl;
104 tbl = table_group->tables[0];
105 #ifdef CONFIG_IOMMU_API
106 tgl = list_first_entry_or_null(&tbl->it_group_list,
107 struct iommu_table_group_link, next);
111 list_del_rcu(&tgl->next);
114 if (table_group->group) {
115 iommu_group_put(table_group->group);
116 BUG_ON(table_group->group);
119 iommu_tce_table_put(tbl);
124 static int tce_build_pSeries(struct iommu_table *tbl, long index,
125 long npages, unsigned long uaddr,
126 enum dma_data_direction direction,
133 proto_tce = TCE_PCI_READ; // Read allowed
135 if (direction != DMA_TO_DEVICE)
136 proto_tce |= TCE_PCI_WRITE;
138 tces = tcep = ((__be64 *)tbl->it_base) + index;
141 /* can't move this out since we might cross MEMBLOCK boundary */
142 rpn = __pa(uaddr) >> TCE_SHIFT;
143 *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
145 uaddr += TCE_PAGE_SIZE;
152 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
156 tces = tcep = ((__be64 *)tbl->it_base) + index;
162 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
166 tcep = ((__be64 *)tbl->it_base) + index;
168 return be64_to_cpu(*tcep);
171 static void tce_free_pSeriesLP(unsigned long liobn, long, long);
172 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
174 static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
175 long npages, unsigned long uaddr,
176 enum dma_data_direction direction,
183 long tcenum_start = tcenum, npages_start = npages;
185 rpn = __pa(uaddr) >> tceshift;
186 proto_tce = TCE_PCI_READ;
187 if (direction != DMA_TO_DEVICE)
188 proto_tce |= TCE_PCI_WRITE;
191 tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
192 rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
194 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
196 tce_free_pSeriesLP(liobn, tcenum_start,
197 (npages_start - (npages + 1)));
201 if (rc && printk_ratelimit()) {
202 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
203 printk("\tindex = 0x%llx\n", (u64)liobn);
204 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
205 printk("\ttce val = 0x%llx\n", tce );
215 static DEFINE_PER_CPU(__be64 *, tce_page);
217 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
218 long npages, unsigned long uaddr,
219 enum dma_data_direction direction,
227 long tcenum_start = tcenum, npages_start = npages;
231 if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
232 return tce_build_pSeriesLP(tbl->it_index, tcenum,
233 tbl->it_page_shift, npages, uaddr,
237 local_irq_save(flags); /* to protect tcep and the page behind it */
239 tcep = __this_cpu_read(tce_page);
241 /* This is safe to do since interrupts are off when we're called
242 * from iommu_alloc{,_sg}()
245 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
246 /* If allocation fails, fall back to the loop implementation */
248 local_irq_restore(flags);
249 return tce_build_pSeriesLP(tbl->it_index, tcenum,
251 npages, uaddr, direction, attrs);
253 __this_cpu_write(tce_page, tcep);
256 rpn = __pa(uaddr) >> TCE_SHIFT;
257 proto_tce = TCE_PCI_READ;
258 if (direction != DMA_TO_DEVICE)
259 proto_tce |= TCE_PCI_WRITE;
261 /* We can map max one pageful of TCEs at a time */
264 * Set up the page with TCE data, looping through and setting
267 limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
269 for (l = 0; l < limit; l++) {
270 tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
274 rc = plpar_tce_put_indirect((u64)tbl->it_index,
281 } while (npages > 0 && !rc);
283 local_irq_restore(flags);
285 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
287 tce_freemulti_pSeriesLP(tbl, tcenum_start,
288 (npages_start - (npages + limit)));
292 if (rc && printk_ratelimit()) {
293 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
294 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
295 printk("\tnpages = 0x%llx\n", (u64)npages);
296 printk("\ttce[0] val = 0x%llx\n", tcep[0]);
302 static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
307 rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
309 if (rc && printk_ratelimit()) {
310 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
311 printk("\tindex = 0x%llx\n", (u64)liobn);
312 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
321 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
325 if (!firmware_has_feature(FW_FEATURE_MULTITCE))
326 return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
328 rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
330 if (rc && printk_ratelimit()) {
331 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
332 printk("\trc = %lld\n", rc);
333 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
334 printk("\tnpages = 0x%llx\n", (u64)npages);
339 static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
342 unsigned long tce_ret;
344 rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
346 if (rc && printk_ratelimit()) {
347 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
348 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
349 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
356 /* this is compatible with cells for the device tree property */
357 struct dynamic_dma_window_prop {
358 __be32 liobn; /* tce table number */
359 __be64 dma_base; /* address hi,lo */
360 __be32 tce_shift; /* ilog2(tce_page_size) */
361 __be32 window_shift; /* ilog2(tce_window_size) */
364 struct direct_window {
365 struct device_node *device;
366 const struct dynamic_dma_window_prop *prop;
367 struct list_head list;
370 /* Dynamic DMA Window support */
371 struct ddw_query_response {
372 u32 windows_available;
373 u32 largest_available_block;
375 u32 migration_capable;
378 struct ddw_create_response {
384 static LIST_HEAD(direct_window_list);
385 /* prevents races between memory on/offline and window creation */
386 static DEFINE_SPINLOCK(direct_window_list_lock);
387 /* protects initializing window twice for same device */
388 static DEFINE_MUTEX(direct_window_init_mutex);
389 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
391 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
392 unsigned long num_pfn, const void *arg)
394 const struct dynamic_dma_window_prop *maprange = arg;
396 u64 tce_size, num_tce, dma_offset, next;
400 tce_shift = be32_to_cpu(maprange->tce_shift);
401 tce_size = 1ULL << tce_shift;
402 next = start_pfn << PAGE_SHIFT;
403 num_tce = num_pfn << PAGE_SHIFT;
405 /* round back to the beginning of the tce page size */
406 num_tce += next & (tce_size - 1);
407 next &= ~(tce_size - 1);
409 /* covert to number of tces */
410 num_tce |= tce_size - 1;
411 num_tce >>= tce_shift;
415 * Set up the page with TCE data, looping through and setting
418 limit = min_t(long, num_tce, 512);
419 dma_offset = next + be64_to_cpu(maprange->dma_base);
421 rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
424 next += limit * tce_size;
426 } while (num_tce > 0 && !rc);
431 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
432 unsigned long num_pfn, const void *arg)
434 const struct dynamic_dma_window_prop *maprange = arg;
435 u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
441 if (!firmware_has_feature(FW_FEATURE_MULTITCE)) {
442 unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
443 unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
444 be64_to_cpu(maprange->dma_base);
445 unsigned long tcenum = dmastart >> tceshift;
446 unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
447 void *uaddr = __va(start_pfn << PAGE_SHIFT);
449 return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
450 tcenum, tceshift, npages, (unsigned long) uaddr,
451 DMA_BIDIRECTIONAL, 0);
454 local_irq_disable(); /* to protect tcep and the page behind it */
455 tcep = __this_cpu_read(tce_page);
458 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
463 __this_cpu_write(tce_page, tcep);
466 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
468 liobn = (u64)be32_to_cpu(maprange->liobn);
469 tce_shift = be32_to_cpu(maprange->tce_shift);
470 tce_size = 1ULL << tce_shift;
471 next = start_pfn << PAGE_SHIFT;
472 num_tce = num_pfn << PAGE_SHIFT;
474 /* round back to the beginning of the tce page size */
475 num_tce += next & (tce_size - 1);
476 next &= ~(tce_size - 1);
478 /* covert to number of tces */
479 num_tce |= tce_size - 1;
480 num_tce >>= tce_shift;
482 /* We can map max one pageful of TCEs at a time */
485 * Set up the page with TCE data, looping through and setting
488 limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
489 dma_offset = next + be64_to_cpu(maprange->dma_base);
491 for (l = 0; l < limit; l++) {
492 tcep[l] = cpu_to_be64(proto_tce | next);
496 rc = plpar_tce_put_indirect(liobn,
502 } while (num_tce > 0 && !rc);
504 /* error cleanup: caller will clear whole range */
510 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
511 unsigned long num_pfn, void *arg)
513 return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
516 static void iommu_table_setparms(struct pci_controller *phb,
517 struct device_node *dn,
518 struct iommu_table *tbl)
520 struct device_node *node;
521 const unsigned long *basep;
526 basep = of_get_property(node, "linux,tce-base", NULL);
527 sizep = of_get_property(node, "linux,tce-size", NULL);
528 if (basep == NULL || sizep == NULL) {
529 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has "
530 "missing tce entries !\n", dn);
534 tbl->it_base = (unsigned long)__va(*basep);
536 if (!is_kdump_kernel())
537 memset((void *)tbl->it_base, 0, *sizep);
539 tbl->it_busno = phb->bus->number;
540 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
542 /* Units of tce entries */
543 tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
545 /* Test if we are going over 2GB of DMA space */
546 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
547 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
548 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
551 phb->dma_window_base_cur += phb->dma_window_size;
553 /* Set the tce table size - measured in entries */
554 tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
557 tbl->it_blocksize = 16;
558 tbl->it_type = TCE_PCI;
562 * iommu_table_setparms_lpar
564 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
566 static void iommu_table_setparms_lpar(struct pci_controller *phb,
567 struct device_node *dn,
568 struct iommu_table *tbl,
569 struct iommu_table_group *table_group,
570 const __be32 *dma_window)
572 unsigned long offset, size;
574 of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
576 tbl->it_busno = phb->bus->number;
577 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
579 tbl->it_blocksize = 16;
580 tbl->it_type = TCE_PCI;
581 tbl->it_offset = offset >> tbl->it_page_shift;
582 tbl->it_size = size >> tbl->it_page_shift;
584 table_group->tce32_start = offset;
585 table_group->tce32_size = size;
588 struct iommu_table_ops iommu_table_pseries_ops = {
589 .set = tce_build_pSeries,
590 .clear = tce_free_pSeries,
591 .get = tce_get_pseries
594 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
596 struct device_node *dn;
597 struct iommu_table *tbl;
598 struct device_node *isa_dn, *isa_dn_orig;
599 struct device_node *tmp;
603 dn = pci_bus_to_OF_node(bus);
605 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn);
608 /* This is not a root bus, any setup will be done for the
609 * device-side of the bridge in iommu_dev_setup_pSeries().
615 /* Check if the ISA bus on the system is under
618 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
620 while (isa_dn && isa_dn != dn)
621 isa_dn = isa_dn->parent;
623 of_node_put(isa_dn_orig);
625 /* Count number of direct PCI children of the PHB. */
626 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
629 pr_debug("Children: %d\n", children);
631 /* Calculate amount of DMA window per slot. Each window must be
632 * a power of two (due to pci_alloc_consistent requirements).
634 * Keep 256MB aside for PHBs with ISA.
638 /* No ISA/IDE - just set window size and return */
639 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
641 while (pci->phb->dma_window_size * children > 0x80000000ul)
642 pci->phb->dma_window_size >>= 1;
643 pr_debug("No ISA/IDE, window size is 0x%llx\n",
644 pci->phb->dma_window_size);
645 pci->phb->dma_window_base_cur = 0;
650 /* If we have ISA, then we probably have an IDE
651 * controller too. Allocate a 128MB table but
652 * skip the first 128MB to avoid stepping on ISA
655 pci->phb->dma_window_size = 0x8000000ul;
656 pci->phb->dma_window_base_cur = 0x8000000ul;
658 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
659 tbl = pci->table_group->tables[0];
661 iommu_table_setparms(pci->phb, dn, tbl);
662 tbl->it_ops = &iommu_table_pseries_ops;
663 iommu_init_table(tbl, pci->phb->node);
664 iommu_register_group(pci->table_group, pci_domain_nr(bus), 0);
666 /* Divide the rest (1.75GB) among the children */
667 pci->phb->dma_window_size = 0x80000000ul;
668 while (pci->phb->dma_window_size * children > 0x70000000ul)
669 pci->phb->dma_window_size >>= 1;
671 pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
674 #ifdef CONFIG_IOMMU_API
675 static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
676 long *tce, enum dma_data_direction *direction)
679 unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
680 unsigned long flags, oldtce = 0;
681 u64 proto_tce = iommu_direction_to_tce_perm(*direction);
682 unsigned long newtce = *tce | proto_tce;
684 spin_lock_irqsave(&tbl->large_pool.lock, flags);
686 rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
688 rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
691 *direction = iommu_tce_direction(oldtce);
692 *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
695 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
701 struct iommu_table_ops iommu_table_lpar_multi_ops = {
702 .set = tce_buildmulti_pSeriesLP,
703 #ifdef CONFIG_IOMMU_API
704 .exchange = tce_exchange_pseries,
706 .clear = tce_freemulti_pSeriesLP,
707 .get = tce_get_pSeriesLP
710 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
712 struct iommu_table *tbl;
713 struct device_node *dn, *pdn;
715 const __be32 *dma_window = NULL;
717 dn = pci_bus_to_OF_node(bus);
719 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
722 /* Find nearest ibm,dma-window, walking up the device tree */
723 for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
724 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
725 if (dma_window != NULL)
729 if (dma_window == NULL) {
730 pr_debug(" no ibm,dma-window property !\n");
736 pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
737 pdn, ppci->table_group);
739 if (!ppci->table_group) {
740 ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
741 tbl = ppci->table_group->tables[0];
742 iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
743 ppci->table_group, dma_window);
744 tbl->it_ops = &iommu_table_lpar_multi_ops;
745 iommu_init_table(tbl, ppci->phb->node);
746 iommu_register_group(ppci->table_group,
747 pci_domain_nr(bus), 0);
748 pr_debug(" created table: %p\n", ppci->table_group);
753 static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
755 struct device_node *dn;
756 struct iommu_table *tbl;
758 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
760 dn = dev->dev.of_node;
762 /* If we're the direct child of a root bus, then we need to allocate
763 * an iommu table ourselves. The bus setup code should have setup
764 * the window sizes already.
766 if (!dev->bus->self) {
767 struct pci_controller *phb = PCI_DN(dn)->phb;
769 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
770 PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
771 tbl = PCI_DN(dn)->table_group->tables[0];
772 iommu_table_setparms(phb, dn, tbl);
773 tbl->it_ops = &iommu_table_pseries_ops;
774 iommu_init_table(tbl, phb->node);
775 iommu_register_group(PCI_DN(dn)->table_group,
776 pci_domain_nr(phb->bus), 0);
777 set_iommu_table_base(&dev->dev, tbl);
778 iommu_add_device(&dev->dev);
782 /* If this device is further down the bus tree, search upwards until
783 * an already allocated iommu table is found and use that.
786 while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL)
789 if (dn && PCI_DN(dn)) {
790 set_iommu_table_base(&dev->dev,
791 PCI_DN(dn)->table_group->tables[0]);
792 iommu_add_device(&dev->dev);
794 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
798 static int __read_mostly disable_ddw;
800 static int __init disable_ddw_setup(char *str)
803 printk(KERN_INFO "ppc iommu: disabling ddw.\n");
808 early_param("disable_ddw", disable_ddw_setup);
810 static void remove_ddw(struct device_node *np, bool remove_prop)
812 struct dynamic_dma_window_prop *dwp;
813 struct property *win64;
818 ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
821 win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
825 if (ret || win64->length < sizeof(*dwp))
829 liobn = (u64)be32_to_cpu(dwp->liobn);
831 /* clear the whole window, note the arg is in kernel pages */
832 ret = tce_clearrange_multi_pSeriesLP(0,
833 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
835 pr_warning("%pOF failed to clear tces in window.\n",
838 pr_debug("%pOF successfully cleared tces in window.\n",
841 ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
843 pr_warning("%pOF: failed to remove direct window: rtas returned "
844 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
845 np, ret, ddw_avail[2], liobn);
847 pr_debug("%pOF: successfully removed direct window: rtas returned "
848 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
849 np, ret, ddw_avail[2], liobn);
853 ret = of_remove_property(np, win64);
855 pr_warning("%pOF: failed to remove direct window property: %d\n",
859 static u64 find_existing_ddw(struct device_node *pdn)
861 struct direct_window *window;
862 const struct dynamic_dma_window_prop *direct64;
865 spin_lock(&direct_window_list_lock);
866 /* check if we already created a window and dupe that config if so */
867 list_for_each_entry(window, &direct_window_list, list) {
868 if (window->device == pdn) {
869 direct64 = window->prop;
870 dma_addr = be64_to_cpu(direct64->dma_base);
874 spin_unlock(&direct_window_list_lock);
879 static int find_existing_ddw_windows(void)
882 struct device_node *pdn;
883 struct direct_window *window;
884 const struct dynamic_dma_window_prop *direct64;
886 if (!firmware_has_feature(FW_FEATURE_LPAR))
889 for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
890 direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
894 window = kzalloc(sizeof(*window), GFP_KERNEL);
895 if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
897 remove_ddw(pdn, true);
901 window->device = pdn;
902 window->prop = direct64;
903 spin_lock(&direct_window_list_lock);
904 list_add(&window->list, &direct_window_list);
905 spin_unlock(&direct_window_list_lock);
910 machine_arch_initcall(pseries, find_existing_ddw_windows);
912 static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
913 struct ddw_query_response *query)
915 struct device_node *dn;
922 * Get the config address and phb buid of the PE window.
923 * Rely on eeh to retrieve this for us.
924 * Retrieve them from the pci device, not the node with the
925 * dma-window property
927 dn = pci_device_to_OF_node(dev);
929 buid = pdn->phb->buid;
930 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
932 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
933 cfg_addr, BUID_HI(buid), BUID_LO(buid));
934 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
935 " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
940 static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
941 struct ddw_create_response *create, int page_shift,
944 struct device_node *dn;
951 * Get the config address and phb buid of the PE window.
952 * Rely on eeh to retrieve this for us.
953 * Retrieve them from the pci device, not the node with the
954 * dma-window property
956 dn = pci_device_to_OF_node(dev);
958 buid = pdn->phb->buid;
959 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
962 /* extra outputs are LIOBN and dma-addr (hi, lo) */
963 ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create,
964 cfg_addr, BUID_HI(buid), BUID_LO(buid),
965 page_shift, window_shift);
966 } while (rtas_busy_delay(ret));
968 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
969 "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
970 cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
971 window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
976 struct failed_ddw_pdn {
977 struct device_node *pdn;
978 struct list_head list;
981 static LIST_HEAD(failed_ddw_pdn_list);
984 * If the PE supports dynamic dma windows, and there is space for a table
985 * that can map all pages in a linear offset, then setup such a table,
986 * and record the dma-offset in the struct device.
988 * dev: the pci device we are checking
989 * pdn: the parent pe node with the ibm,dma_window property
990 * Future: also check if we can remap the base window for our base page size
992 * returns the dma offset for use by dma_set_mask
994 static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
997 struct ddw_query_response query;
998 struct ddw_create_response create;
1000 u64 dma_addr, max_addr;
1001 struct device_node *dn;
1003 struct direct_window *window;
1004 struct property *win64;
1005 struct dynamic_dma_window_prop *ddwprop;
1006 struct failed_ddw_pdn *fpdn;
1008 mutex_lock(&direct_window_init_mutex);
1010 dma_addr = find_existing_ddw(pdn);
1015 * If we already went through this for a previous function of
1016 * the same device and failed, we don't want to muck with the
1017 * DMA window again, as it will race with in-flight operations
1018 * and can lead to EEHs. The above mutex protects access to the
1021 list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
1022 if (fpdn->pdn == pdn)
1027 * the ibm,ddw-applicable property holds the tokens for:
1028 * ibm,query-pe-dma-window
1029 * ibm,create-pe-dma-window
1030 * ibm,remove-pe-dma-window
1031 * for the given node in that order.
1032 * the property is actually in the parent, not the PE
1034 ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
1040 * Query if there is a second window of size to map the
1041 * whole partition. Query returns number of windows, largest
1042 * block assigned to PE (partition endpoint), and two bitmasks
1043 * of page sizes: supported and supported for migrate-dma.
1045 dn = pci_device_to_OF_node(dev);
1046 ret = query_ddw(dev, ddw_avail, &query);
1050 if (query.windows_available == 0) {
1052 * no additional windows are available for this device.
1053 * We might be able to reallocate the existing window,
1054 * trading in for a larger page size.
1056 dev_dbg(&dev->dev, "no free dynamic windows");
1059 if (query.page_size & 4) {
1060 page_shift = 24; /* 16MB */
1061 } else if (query.page_size & 2) {
1062 page_shift = 16; /* 64kB */
1063 } else if (query.page_size & 1) {
1064 page_shift = 12; /* 4kB */
1066 dev_dbg(&dev->dev, "no supported direct page size in mask %x",
1070 /* verify the window * number of ptes will map the partition */
1071 /* check largest block * page size > max memory hotplug addr */
1072 max_addr = memory_hotplug_max();
1073 if (query.largest_available_block < (max_addr >> page_shift)) {
1074 dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u "
1075 "%llu-sized pages\n", max_addr, query.largest_available_block,
1076 1ULL << page_shift);
1079 len = order_base_2(max_addr);
1080 win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
1083 "couldn't allocate property for 64bit dma window\n");
1086 win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
1087 win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
1088 win64->length = sizeof(*ddwprop);
1089 if (!win64->name || !win64->value) {
1091 "couldn't allocate property name and value\n");
1095 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
1099 ddwprop->liobn = cpu_to_be32(create.liobn);
1100 ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) |
1102 ddwprop->tce_shift = cpu_to_be32(page_shift);
1103 ddwprop->window_shift = cpu_to_be32(len);
1105 dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n",
1108 window = kzalloc(sizeof(*window), GFP_KERNEL);
1110 goto out_clear_window;
1112 ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
1113 win64->value, tce_setrange_multi_pSeriesLP_walk);
1115 dev_info(&dev->dev, "failed to map direct window for %pOF: %d\n",
1117 goto out_free_window;
1120 ret = of_add_property(pdn, win64);
1122 dev_err(&dev->dev, "unable to add dma window property for %pOF: %d",
1124 goto out_free_window;
1127 window->device = pdn;
1128 window->prop = ddwprop;
1129 spin_lock(&direct_window_list_lock);
1130 list_add(&window->list, &direct_window_list);
1131 spin_unlock(&direct_window_list_lock);
1133 dma_addr = be64_to_cpu(ddwprop->dma_base);
1140 remove_ddw(pdn, true);
1144 kfree(win64->value);
1149 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1153 list_add(&fpdn->list, &failed_ddw_pdn_list);
1156 mutex_unlock(&direct_window_init_mutex);
1160 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1162 struct device_node *pdn, *dn;
1163 struct iommu_table *tbl;
1164 const __be32 *dma_window = NULL;
1167 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
1169 /* dev setup for LPAR is a little tricky, since the device tree might
1170 * contain the dma-window properties per-device and not necessarily
1171 * for the bus. So we need to search upwards in the tree until we
1172 * either hit a dma-window property, OR find a parent with a table
1173 * already allocated.
1175 dn = pci_device_to_OF_node(dev);
1176 pr_debug(" node is %pOF\n", dn);
1178 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
1179 pdn = pdn->parent) {
1180 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1185 if (!pdn || !PCI_DN(pdn)) {
1186 printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
1187 "no DMA window found for pci dev=%s dn=%pOF\n",
1191 pr_debug(" parent is %pOF\n", pdn);
1194 if (!pci->table_group) {
1195 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
1196 tbl = pci->table_group->tables[0];
1197 iommu_table_setparms_lpar(pci->phb, pdn, tbl,
1198 pci->table_group, dma_window);
1199 tbl->it_ops = &iommu_table_lpar_multi_ops;
1200 iommu_init_table(tbl, pci->phb->node);
1201 iommu_register_group(pci->table_group,
1202 pci_domain_nr(pci->phb->bus), 0);
1203 pr_debug(" created table: %p\n", pci->table_group);
1205 pr_debug(" found DMA window, table: %p\n", pci->table_group);
1208 set_iommu_table_base(&dev->dev, pci->table_group->tables[0]);
1209 iommu_add_device(&dev->dev);
1212 static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1214 bool ddw_enabled = false;
1215 struct device_node *pdn, *dn;
1216 struct pci_dev *pdev;
1217 const __be32 *dma_window = NULL;
1223 if (!dev_is_pci(dev))
1226 pdev = to_pci_dev(dev);
1228 /* only attempt to use a new window if 64-bit DMA is requested */
1229 if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) {
1230 dn = pci_device_to_OF_node(pdev);
1231 dev_dbg(dev, "node is %pOF\n", dn);
1234 * the device tree might contain the dma-window properties
1235 * per-device and not necessarily for the bus. So we need to
1236 * search upwards in the tree until we either hit a dma-window
1237 * property, OR find a parent with a table already allocated.
1239 for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
1240 pdn = pdn->parent) {
1241 dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1245 if (pdn && PCI_DN(pdn)) {
1246 dma_offset = enable_ddw(pdev, pdn);
1247 if (dma_offset != 0) {
1248 dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
1249 set_dma_offset(dev, dma_offset);
1250 set_dma_ops(dev, &dma_direct_ops);
1256 /* fall back on iommu ops */
1257 if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) {
1258 dev_info(dev, "Restoring 32-bit DMA via iommu\n");
1259 set_dma_ops(dev, &dma_iommu_ops);
1263 if (!dma_supported(dev, dma_mask))
1266 *dev->dma_mask = dma_mask;
1270 static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
1275 if (!disable_ddw && dev_is_pci(dev)) {
1276 struct pci_dev *pdev = to_pci_dev(dev);
1277 struct device_node *dn;
1279 dn = pci_device_to_OF_node(pdev);
1281 /* search upwards for ibm,dma-window */
1282 for (; dn && PCI_DN(dn) && !PCI_DN(dn)->table_group;
1284 if (of_get_property(dn, "ibm,dma-window", NULL))
1286 /* if there is a ibm,ddw-applicable property require 64 bits */
1287 if (dn && PCI_DN(dn) &&
1288 of_get_property(dn, "ibm,ddw-applicable", NULL))
1289 return DMA_BIT_MASK(64);
1292 return dma_iommu_ops.get_required_mask(dev);
1295 static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
1298 struct direct_window *window;
1299 struct memory_notify *arg = data;
1303 case MEM_GOING_ONLINE:
1304 spin_lock(&direct_window_list_lock);
1305 list_for_each_entry(window, &direct_window_list, list) {
1306 ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
1307 arg->nr_pages, window->prop);
1310 spin_unlock(&direct_window_list_lock);
1312 case MEM_CANCEL_ONLINE:
1314 spin_lock(&direct_window_list_lock);
1315 list_for_each_entry(window, &direct_window_list, list) {
1316 ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
1317 arg->nr_pages, window->prop);
1320 spin_unlock(&direct_window_list_lock);
1325 if (ret && action != MEM_CANCEL_ONLINE)
1331 static struct notifier_block iommu_mem_nb = {
1332 .notifier_call = iommu_mem_notifier,
1335 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
1337 int err = NOTIFY_OK;
1338 struct of_reconfig_data *rd = data;
1339 struct device_node *np = rd->dn;
1340 struct pci_dn *pci = PCI_DN(np);
1341 struct direct_window *window;
1344 case OF_RECONFIG_DETACH_NODE:
1346 * Removing the property will invoke the reconfig
1347 * notifier again, which causes dead-lock on the
1348 * read-write semaphore of the notifier chain. So
1349 * we have to remove the property when releasing
1352 remove_ddw(np, false);
1353 if (pci && pci->table_group)
1354 iommu_pseries_free_group(pci->table_group,
1357 spin_lock(&direct_window_list_lock);
1358 list_for_each_entry(window, &direct_window_list, list) {
1359 if (window->device == np) {
1360 list_del(&window->list);
1365 spin_unlock(&direct_window_list_lock);
1374 static struct notifier_block iommu_reconfig_nb = {
1375 .notifier_call = iommu_reconfig_notifier,
1378 /* These are called very early. */
1379 void iommu_init_early_pSeries(void)
1381 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
1384 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1385 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1386 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1387 ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
1388 ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
1390 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
1391 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
1395 of_reconfig_notifier_register(&iommu_reconfig_nb);
1396 register_memory_notifier(&iommu_mem_nb);
1398 set_pci_dma_ops(&dma_iommu_ops);
1401 static int __init disable_multitce(char *str)
1403 if (strcmp(str, "off") == 0 &&
1404 firmware_has_feature(FW_FEATURE_LPAR) &&
1405 firmware_has_feature(FW_FEATURE_MULTITCE)) {
1406 printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
1407 powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
1412 __setup("multitce=", disable_multitce);
1414 machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);