1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/alpha/kernel/pci_iommu.c
6 #include <linux/kernel.h>
10 #include <linux/memblock.h>
11 #include <linux/export.h>
12 #include <linux/scatterlist.h>
13 #include <linux/log2.h>
14 #include <linux/dma-map-ops.h>
15 #include <linux/iommu-helper.h>
18 #include <asm/hwrpb.h>
26 # define DBGA(args...) printk(KERN_DEBUG args)
28 # define DBGA(args...)
31 # define DBGA2(args...) printk(KERN_DEBUG args)
33 # define DBGA2(args...)
36 #define DEBUG_NODIRECT 0
38 #define ISA_DMA_MASK 0x00ffffff
40 static inline unsigned long
41 mk_iommu_pte(unsigned long paddr)
43 return (paddr >> (PAGE_SHIFT-1)) | 1;
46 /* Return the minimum of MAX or the first power of two larger
50 size_for_memory(unsigned long max)
52 unsigned long mem = max_low_pfn << PAGE_SHIFT;
54 max = roundup_pow_of_two(mem);
58 struct pci_iommu_arena * __init
59 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
60 unsigned long window_size, unsigned long align)
62 unsigned long mem_size;
63 struct pci_iommu_arena *arena;
65 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
67 /* Note that the TLB lookup logic uses bitwise concatenation,
68 not addition, so the required arena alignment is based on
69 the size of the window. Retain the align parameter so that
70 particular systems can over-align the arena. */
74 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
76 panic("%s: Failed to allocate %zu bytes\n", __func__,
78 arena->ptes = memblock_alloc(mem_size, align);
80 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
81 __func__, mem_size, align);
83 spin_lock_init(&arena->lock);
85 arena->dma_base = base;
86 arena->size = window_size;
87 arena->next_entry = 0;
89 /* Align allocations to a multiple of a page size. Not needed
90 unless there are chip bugs. */
91 arena->align_entry = 1;
96 struct pci_iommu_arena * __init
97 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
98 unsigned long window_size, unsigned long align)
100 return iommu_arena_new_node(0, hose, base, window_size, align);
103 /* Must be called with the arena lock held */
105 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
112 unsigned long boundary_size;
114 base = arena->dma_base >> PAGE_SHIFT;
115 boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
117 /* Search forward for the first mask-aligned sequence of N free ptes */
119 nent = arena->size >> PAGE_SHIFT;
120 p = ALIGN(arena->next_entry, mask + 1);
124 while (i < n && p+i < nent) {
125 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
126 p = ALIGN(p + 1, mask + 1);
131 p = ALIGN(p + i + 1, mask + 1), i = 0;
139 * Reached the end. Flush the TLB and restart
140 * the search from the beginning.
142 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
152 /* Success. It's the responsibility of the caller to mark them
153 in use before releasing the lock */
158 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
165 spin_lock_irqsave(&arena->lock, flags);
167 /* Search for N empty ptes */
169 mask = max(align, arena->align_entry) - 1;
170 p = iommu_arena_find_pages(dev, arena, n, mask);
172 spin_unlock_irqrestore(&arena->lock, flags);
176 /* Success. Mark them all in use, ie not zero and invalid
177 for the iommu tlb that could load them from under us.
178 The chip specific bits will fill this in with something
179 kosher when we return. */
180 for (i = 0; i < n; ++i)
181 ptes[p+i] = IOMMU_INVALID_PTE;
183 arena->next_entry = p + n;
184 spin_unlock_irqrestore(&arena->lock, flags);
190 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
195 p = arena->ptes + ofs;
196 for (i = 0; i < n; ++i)
201 * True if the machine supports DAC addressing, and DEV can
202 * make use of it given MASK.
204 static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
206 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
209 /* If this is not set, the machine doesn't support DAC at all. */
213 /* The device has to be able to address our DAC bit. */
214 if ((dac_offset & dev->dma_mask) != dac_offset)
217 /* If both conditions above are met, we are fine. */
218 DBGA("pci_dac_dma_supported %s from %ps\n",
219 ok ? "yes" : "no", __builtin_return_address(0));
224 /* Map a single buffer of the indicated size for PCI DMA in streaming
225 mode. The 32-bit PCI bus mastering address to use is returned.
226 Once the device is given the dma address, the device owns this memory
227 until either pci_unmap_single or pci_dma_sync_single is performed. */
230 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
233 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
234 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
235 struct pci_iommu_arena *arena;
236 long npages, dma_ofs, i;
239 unsigned int align = 0;
240 struct device *dev = pdev ? &pdev->dev : NULL;
242 paddr = __pa(cpu_addr);
245 /* First check to see if we can use the direct map window. */
246 if (paddr + size + __direct_map_base - 1 <= max_dma
247 && paddr + size <= __direct_map_size) {
248 ret = paddr + __direct_map_base;
250 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
251 cpu_addr, size, ret, __builtin_return_address(0));
257 /* Next, use DAC if selected earlier. */
259 ret = paddr + alpha_mv.pci_dac_offset;
261 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
262 cpu_addr, size, ret, __builtin_return_address(0));
267 /* If the machine doesn't define a pci_tbi routine, we have to
268 assume it doesn't support sg mapping, and, since we tried to
269 use direct_map above, it now must be considered an error. */
270 if (! alpha_mv.mv_pci_tbi) {
271 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
272 return DMA_MAPPING_ERROR;
275 arena = hose->sg_pci;
276 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
277 arena = hose->sg_isa;
279 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
281 /* Force allocation to 64KB boundary for ISA bridges. */
282 if (pdev && pdev == isa_bridge)
284 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
286 printk(KERN_WARNING "pci_map_single failed: "
287 "could not allocate dma page tables\n");
288 return DMA_MAPPING_ERROR;
292 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
293 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
295 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
296 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
298 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
299 cpu_addr, size, npages, ret, __builtin_return_address(0));
304 /* Helper for generic DMA-mapping functions. */
305 static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
307 if (dev && dev_is_pci(dev))
308 return to_pci_dev(dev);
310 /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
314 /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
315 bridge is bus master then). */
316 if (!dev || !dev->dma_mask || !*dev->dma_mask)
319 /* For EISA bus masters, return isa_bridge (it might have smaller
320 dma_mask due to wiring limitations). */
321 if (*dev->dma_mask >= isa_bridge->dma_mask)
324 /* This assumes ISA bus master with dma_mask 0xffffff. */
328 static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
329 unsigned long offset, size_t size,
330 enum dma_data_direction dir,
333 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
336 BUG_ON(dir == DMA_NONE);
338 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
339 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
343 /* Unmap a single streaming mode DMA translation. The DMA_ADDR and
344 SIZE must match what was provided for in a previous pci_map_single
345 call. All other usages are undefined. After this call, reads by
346 the cpu to the buffer are guaranteed to see whatever the device
349 static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
350 size_t size, enum dma_data_direction dir,
354 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
355 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
356 struct pci_iommu_arena *arena;
357 long dma_ofs, npages;
359 BUG_ON(dir == DMA_NONE);
361 if (dma_addr >= __direct_map_base
362 && dma_addr < __direct_map_base + __direct_map_size) {
365 DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
366 dma_addr, size, __builtin_return_address(0));
371 if (dma_addr > 0xffffffff) {
372 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
373 dma_addr, size, __builtin_return_address(0));
377 arena = hose->sg_pci;
378 if (!arena || dma_addr < arena->dma_base)
379 arena = hose->sg_isa;
381 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
382 if (dma_ofs * PAGE_SIZE >= arena->size) {
383 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
384 " base %llx size %x\n",
385 dma_addr, arena->dma_base, arena->size);
390 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
392 spin_lock_irqsave(&arena->lock, flags);
394 iommu_arena_free(arena, dma_ofs, npages);
396 /* If we're freeing ptes above the `next_entry' pointer (they
397 may have snuck back into the TLB since the last wrap flush),
398 we need to flush the TLB before reallocating the latter. */
399 if (dma_ofs >= arena->next_entry)
400 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
402 spin_unlock_irqrestore(&arena->lock, flags);
404 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
405 dma_addr, size, npages, __builtin_return_address(0));
408 /* Allocate and map kernel buffer using consistent mode DMA for PCI
409 device. Returns non-NULL cpu-view pointer to the buffer if
410 successful and sets *DMA_ADDRP to the pci side dma address as well,
411 else DMA_ADDRP is undefined. */
413 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
414 dma_addr_t *dma_addrp, gfp_t gfp,
417 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
419 long order = get_order(size);
424 cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
426 printk(KERN_INFO "pci_alloc_consistent: "
427 "get_free_pages failed from %ps\n",
428 __builtin_return_address(0));
429 /* ??? Really atomic allocation? Otherwise we could play
430 with vmalloc and sg if we can't find contiguous memory. */
433 memset(cpu_addr, 0, size);
435 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
436 if (*dma_addrp == DMA_MAPPING_ERROR) {
437 free_pages((unsigned long)cpu_addr, order);
438 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
440 /* The address doesn't fit required mask and we
441 do not have iommu. Try again with GFP_DMA. */
446 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
447 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
452 /* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
453 be values that were returned from pci_alloc_consistent. SIZE must
454 be the same as what as passed into pci_alloc_consistent.
455 References to the memory and mappings associated with CPU_ADDR or
456 DMA_ADDR past this call are illegal. */
458 static void alpha_pci_free_coherent(struct device *dev, size_t size,
459 void *cpu_addr, dma_addr_t dma_addr,
462 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
463 dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL);
464 free_pages((unsigned long)cpu_addr, get_order(size));
466 DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
467 dma_addr, size, __builtin_return_address(0));
470 /* Classify the elements of the scatterlist. Write dma_address
471 of each element with:
472 0 : Followers all physically adjacent.
473 1 : Followers all virtually adjacent.
474 -1 : Not leader, physically adjacent to previous.
475 -2 : Not leader, virtually adjacent to previous.
476 Write dma_length of each leader with the combined lengths of
477 the mergable followers. */
479 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
480 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
483 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
486 unsigned long next_paddr;
487 struct scatterlist *leader;
488 long leader_flag, leader_length;
489 unsigned int max_seg_size;
493 leader_length = leader->length;
494 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
496 /* we will not marge sg without device. */
497 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
498 for (++sg; sg < end; ++sg) {
499 unsigned long addr, len;
500 addr = SG_ENT_PHYS_ADDRESS(sg);
503 if (leader_length + len > max_seg_size)
506 if (next_paddr == addr) {
507 sg->dma_address = -1;
508 leader_length += len;
509 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
510 sg->dma_address = -2;
512 leader_length += len;
515 leader->dma_address = leader_flag;
516 leader->dma_length = leader_length;
522 next_paddr = addr + len;
525 leader->dma_address = leader_flag;
526 leader->dma_length = leader_length;
529 /* Given a scatterlist leader, choose an allocation method and fill
533 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
534 struct scatterlist *out, struct pci_iommu_arena *arena,
535 dma_addr_t max_dma, int dac_allowed)
537 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
538 long size = leader->dma_length;
539 struct scatterlist *sg;
541 long npages, dma_ofs, i;
544 /* If everything is physically contiguous, and the addresses
545 fall into the direct-map window, use it. */
546 if (leader->dma_address == 0
547 && paddr + size + __direct_map_base - 1 <= max_dma
548 && paddr + size <= __direct_map_size) {
549 out->dma_address = paddr + __direct_map_base;
550 out->dma_length = size;
552 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
553 __va(paddr), size, out->dma_address);
559 /* If physically contiguous and DAC is available, use it. */
560 if (leader->dma_address == 0 && dac_allowed) {
561 out->dma_address = paddr + alpha_mv.pci_dac_offset;
562 out->dma_length = size;
564 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
565 __va(paddr), size, out->dma_address);
570 /* Otherwise, we'll use the iommu to make the pages virtually
574 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
575 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
577 /* If we attempted a direct map above but failed, die. */
578 if (leader->dma_address == 0)
581 /* Otherwise, break up the remaining virtually contiguous
582 hunks into individual direct maps and retry. */
583 sg_classify(dev, leader, end, 0);
584 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
587 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
588 out->dma_length = size;
590 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
591 __va(paddr), size, out->dma_address, npages);
593 /* All virtually contiguous. We need to find the length of each
594 physically contiguous subsegment to fill in the ptes. */
595 ptes = &arena->ptes[dma_ofs];
599 struct scatterlist *last_sg = sg;
603 paddr = SG_ENT_PHYS_ADDRESS(sg);
605 while (sg+1 < end && (int) sg[1].dma_address == -1) {
606 size += sg[1].length;
610 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
613 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
614 *ptes++ = mk_iommu_pte(paddr);
617 DBGA(" (%ld) [%p,%x] np %ld\n",
618 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
619 last_sg->length, npages);
620 while (++last_sg <= sg) {
621 DBGA(" (%ld) [%p,%x] cont\n",
622 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
626 } while (++sg < end && (int) sg->dma_address < 0);
631 static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
632 int nents, enum dma_data_direction dir,
635 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
636 struct scatterlist *start, *end, *out;
637 struct pci_controller *hose;
638 struct pci_iommu_arena *arena;
642 BUG_ON(dir == DMA_NONE);
644 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
646 /* Fast path single entry scatterlists. */
648 sg->dma_length = sg->length;
650 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
651 sg->length, dac_allowed);
652 if (sg->dma_address == DMA_MAPPING_ERROR)
660 /* First, prepare information about the entries. */
661 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
663 /* Second, figure out where we're going to map things. */
664 if (alpha_mv.mv_pci_tbi) {
665 hose = pdev ? pdev->sysdata : pci_isa_hose;
666 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
667 arena = hose->sg_pci;
668 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
669 arena = hose->sg_isa;
676 /* Third, iterate over the scatterlist leaders and allocate
677 dma space as needed. */
678 for (out = sg; sg < end; ++sg) {
679 if ((int) sg->dma_address < 0)
681 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
686 /* Mark the end of the list for pci_unmap_sg. */
690 if (out - start == 0) {
691 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
694 DBGA("pci_map_sg: %ld entries\n", out - start);
699 printk(KERN_WARNING "pci_map_sg failed: "
700 "could not allocate dma page tables\n");
702 /* Some allocation failed while mapping the scatterlist
703 entries. Unmap them now. */
705 dma_unmap_sg(&pdev->dev, start, out - start, dir);
709 /* Unmap a set of streaming mode DMA translations. Again, cpu read
710 rules concerning calls here are the same as for pci_unmap_single()
713 static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
714 int nents, enum dma_data_direction dir,
717 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
719 struct pci_controller *hose;
720 struct pci_iommu_arena *arena;
721 struct scatterlist *end;
723 dma_addr_t fbeg, fend;
725 BUG_ON(dir == DMA_NONE);
727 if (! alpha_mv.mv_pci_tbi)
730 hose = pdev ? pdev->sysdata : pci_isa_hose;
731 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
732 arena = hose->sg_pci;
733 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
734 arena = hose->sg_isa;
738 spin_lock_irqsave(&arena->lock, flags);
740 for (end = sg + nents; sg < end; ++sg) {
746 addr = sg->dma_address;
747 size = sg->dma_length;
751 if (addr > 0xffffffff) {
752 /* It's a DAC address -- nothing to do. */
753 DBGA(" (%ld) DAC [%llx,%zx]\n",
754 sg - end + nents, addr, size);
758 if (addr >= __direct_map_base
759 && addr < __direct_map_base + __direct_map_size) {
761 DBGA(" (%ld) direct [%llx,%zx]\n",
762 sg - end + nents, addr, size);
766 DBGA(" (%ld) sg [%llx,%zx]\n",
767 sg - end + nents, addr, size);
769 npages = iommu_num_pages(addr, size, PAGE_SIZE);
770 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
771 iommu_arena_free(arena, ofs, npages);
773 tend = addr + size - 1;
774 if (fbeg > addr) fbeg = addr;
775 if (fend < tend) fend = tend;
778 /* If we're freeing ptes above the `next_entry' pointer (they
779 may have snuck back into the TLB since the last wrap flush),
780 we need to flush the TLB before reallocating the latter. */
781 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
782 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
784 spin_unlock_irqrestore(&arena->lock, flags);
786 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
789 /* Return whether the given PCI device DMA address mask can be
790 supported properly. */
792 static int alpha_pci_supported(struct device *dev, u64 mask)
794 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
795 struct pci_controller *hose;
796 struct pci_iommu_arena *arena;
798 /* If there exists a direct map, and the mask fits either
799 the entire direct mapped space or the total system memory as
800 shifted by the map base */
801 if (__direct_map_size != 0
802 && (__direct_map_base + __direct_map_size - 1 <= mask ||
803 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
806 /* Check that we have a scatter-gather arena that fits. */
807 hose = pdev ? pdev->sysdata : pci_isa_hose;
808 arena = hose->sg_isa;
809 if (arena && arena->dma_base + arena->size - 1 <= mask)
811 arena = hose->sg_pci;
812 if (arena && arena->dma_base + arena->size - 1 <= mask)
815 /* As last resort try ZONE_DMA. */
816 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
824 * AGP GART extensions to the IOMMU
827 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
833 if (!arena) return -EINVAL;
835 spin_lock_irqsave(&arena->lock, flags);
837 /* Search for N empty ptes. */
839 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
841 spin_unlock_irqrestore(&arena->lock, flags);
845 /* Success. Mark them all reserved (ie not zero and invalid)
846 for the iommu tlb that could load them from under us.
847 They will be filled in with valid bits by _bind() */
848 for (i = 0; i < pg_count; ++i)
849 ptes[p+i] = IOMMU_RESERVED_PTE;
851 arena->next_entry = p + pg_count;
852 spin_unlock_irqrestore(&arena->lock, flags);
858 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
863 if (!arena) return -EINVAL;
867 /* Make sure they're all reserved first... */
868 for(i = pg_start; i < pg_start + pg_count; i++)
869 if (ptes[i] != IOMMU_RESERVED_PTE)
872 iommu_arena_free(arena, pg_start, pg_count);
877 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
884 if (!arena) return -EINVAL;
886 spin_lock_irqsave(&arena->lock, flags);
890 for(j = pg_start; j < pg_start + pg_count; j++) {
891 if (ptes[j] != IOMMU_RESERVED_PTE) {
892 spin_unlock_irqrestore(&arena->lock, flags);
897 for(i = 0, j = pg_start; i < pg_count; i++, j++)
898 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
900 spin_unlock_irqrestore(&arena->lock, flags);
906 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
911 if (!arena) return -EINVAL;
913 p = arena->ptes + pg_start;
914 for(i = 0; i < pg_count; i++)
915 p[i] = IOMMU_RESERVED_PTE;
920 const struct dma_map_ops alpha_pci_ops = {
921 .alloc = alpha_pci_alloc_coherent,
922 .free = alpha_pci_free_coherent,
923 .map_page = alpha_pci_map_page,
924 .unmap_page = alpha_pci_unmap_page,
925 .map_sg = alpha_pci_map_sg,
926 .unmap_sg = alpha_pci_unmap_sg,
927 .dma_supported = alpha_pci_supported,
928 .mmap = dma_common_mmap,
929 .get_sgtable = dma_common_get_sgtable,
930 .alloc_pages = dma_common_alloc_pages,
931 .free_pages = dma_common_free_pages,
933 EXPORT_SYMBOL(alpha_pci_ops);