2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/bitops.h>
10 #include <linux/debugfs.h>
11 #include <linux/err.h>
12 #include <linux/iommu.h>
13 #include <linux/kernel.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <soc/tegra/ahb.h>
21 #include <soc/tegra/mc.h>
28 const struct tegra_smmu_soc *soc;
30 unsigned long pfn_mask;
31 unsigned long tlb_mask;
36 struct list_head list;
38 struct dentry *debugfs;
40 struct iommu_device iommu; /* IOMMU Core code handle */
43 struct tegra_smmu_as {
44 struct iommu_domain domain;
45 struct tegra_smmu *smmu;
46 unsigned int use_count;
55 static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
57 return container_of(dom, struct tegra_smmu_as, domain);
60 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
63 writel(value, smmu->regs + offset);
66 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
68 return readl(smmu->regs + offset);
71 #define SMMU_CONFIG 0x010
72 #define SMMU_CONFIG_ENABLE (1 << 0)
74 #define SMMU_TLB_CONFIG 0x14
75 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
76 #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
77 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
78 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
80 #define SMMU_PTC_CONFIG 0x18
81 #define SMMU_PTC_CONFIG_ENABLE (1 << 29)
82 #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24)
83 #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f)
85 #define SMMU_PTB_ASID 0x01c
86 #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f)
88 #define SMMU_PTB_DATA 0x020
89 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr))
91 #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr))
93 #define SMMU_TLB_FLUSH 0x030
94 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0)
95 #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0)
96 #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0)
97 #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \
98 SMMU_TLB_FLUSH_VA_MATCH_SECTION)
99 #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \
100 SMMU_TLB_FLUSH_VA_MATCH_GROUP)
101 #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31)
103 #define SMMU_PTC_FLUSH 0x034
104 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0)
105 #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0)
107 #define SMMU_PTC_FLUSH_HI 0x9b8
108 #define SMMU_PTC_FLUSH_HI_MASK 0x3
110 /* per-SWGROUP SMMU_*_ASID register */
111 #define SMMU_ASID_ENABLE (1 << 31)
112 #define SMMU_ASID_MASK 0x7f
113 #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK)
115 /* page table definitions */
116 #define SMMU_NUM_PDE 1024
117 #define SMMU_NUM_PTE 1024
119 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4)
120 #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4)
122 #define SMMU_PDE_SHIFT 22
123 #define SMMU_PTE_SHIFT 12
125 #define SMMU_PD_READABLE (1 << 31)
126 #define SMMU_PD_WRITABLE (1 << 30)
127 #define SMMU_PD_NONSECURE (1 << 29)
129 #define SMMU_PDE_READABLE (1 << 31)
130 #define SMMU_PDE_WRITABLE (1 << 30)
131 #define SMMU_PDE_NONSECURE (1 << 29)
132 #define SMMU_PDE_NEXT (1 << 28)
134 #define SMMU_PTE_READABLE (1 << 31)
135 #define SMMU_PTE_WRITABLE (1 << 30)
136 #define SMMU_PTE_NONSECURE (1 << 29)
138 #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \
140 #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \
143 static unsigned int iova_pd_index(unsigned long iova)
145 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
148 static unsigned int iova_pt_index(unsigned long iova)
150 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
153 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
156 return (addr & smmu->pfn_mask) == addr;
159 static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
161 return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
164 static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
166 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH);
169 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma,
170 unsigned long offset)
174 offset &= ~(smmu->mc->soc->atom_size - 1);
176 if (smmu->mc->soc->num_address_bits > 32) {
177 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
178 value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK;
182 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI);
185 value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR;
186 smmu_writel(smmu, value, SMMU_PTC_FLUSH);
189 static inline void smmu_flush_tlb(struct tegra_smmu *smmu)
191 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH);
194 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu,
199 if (smmu->soc->num_asids == 4)
200 value = (asid & 0x3) << 29;
202 value = (asid & 0x7f) << 24;
204 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_MATCH_ALL;
205 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
208 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu,
214 if (smmu->soc->num_asids == 4)
215 value = (asid & 0x3) << 29;
217 value = (asid & 0x7f) << 24;
219 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
220 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
223 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu,
229 if (smmu->soc->num_asids == 4)
230 value = (asid & 0x3) << 29;
232 value = (asid & 0x7f) << 24;
234 value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
235 smmu_writel(smmu, value, SMMU_TLB_FLUSH);
238 static inline void smmu_flush(struct tegra_smmu *smmu)
240 smmu_readl(smmu, SMMU_CONFIG);
243 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp)
247 mutex_lock(&smmu->lock);
249 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids);
250 if (id >= smmu->soc->num_asids) {
251 mutex_unlock(&smmu->lock);
255 set_bit(id, smmu->asids);
258 mutex_unlock(&smmu->lock);
262 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
264 mutex_lock(&smmu->lock);
265 clear_bit(id, smmu->asids);
266 mutex_unlock(&smmu->lock);
269 static bool tegra_smmu_capable(enum iommu_cap cap)
274 static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
276 struct tegra_smmu_as *as;
278 if (type != IOMMU_DOMAIN_UNMANAGED)
281 as = kzalloc(sizeof(*as), GFP_KERNEL);
285 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
287 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
293 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL);
300 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL);
309 as->domain.geometry.aperture_start = 0;
310 as->domain.geometry.aperture_end = 0xffffffff;
311 as->domain.geometry.force_aperture = true;
316 static void tegra_smmu_domain_free(struct iommu_domain *domain)
318 struct tegra_smmu_as *as = to_smmu_as(domain);
320 /* TODO: free page directory and page tables */
325 static const struct tegra_smmu_swgroup *
326 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup)
328 const struct tegra_smmu_swgroup *group = NULL;
331 for (i = 0; i < smmu->soc->num_swgroups; i++) {
332 if (smmu->soc->swgroups[i].swgroup == swgroup) {
333 group = &smmu->soc->swgroups[i];
341 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup,
344 const struct tegra_smmu_swgroup *group;
348 for (i = 0; i < smmu->soc->num_clients; i++) {
349 const struct tegra_mc_client *client = &smmu->soc->clients[i];
351 if (client->swgroup != swgroup)
354 value = smmu_readl(smmu, client->smmu.reg);
355 value |= BIT(client->smmu.bit);
356 smmu_writel(smmu, value, client->smmu.reg);
359 group = tegra_smmu_find_swgroup(smmu, swgroup);
361 value = smmu_readl(smmu, group->reg);
362 value &= ~SMMU_ASID_MASK;
363 value |= SMMU_ASID_VALUE(asid);
364 value |= SMMU_ASID_ENABLE;
365 smmu_writel(smmu, value, group->reg);
369 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup,
372 const struct tegra_smmu_swgroup *group;
376 group = tegra_smmu_find_swgroup(smmu, swgroup);
378 value = smmu_readl(smmu, group->reg);
379 value &= ~SMMU_ASID_MASK;
380 value |= SMMU_ASID_VALUE(asid);
381 value &= ~SMMU_ASID_ENABLE;
382 smmu_writel(smmu, value, group->reg);
385 for (i = 0; i < smmu->soc->num_clients; i++) {
386 const struct tegra_mc_client *client = &smmu->soc->clients[i];
388 if (client->swgroup != swgroup)
391 value = smmu_readl(smmu, client->smmu.reg);
392 value &= ~BIT(client->smmu.bit);
393 smmu_writel(smmu, value, client->smmu.reg);
397 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu,
398 struct tegra_smmu_as *as)
403 if (as->use_count > 0) {
408 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD,
410 if (dma_mapping_error(smmu->dev, as->pd_dma))
413 /* We can't handle 64-bit DMA addresses */
414 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) {
419 err = tegra_smmu_alloc_asid(smmu, &as->id);
423 smmu_flush_ptc(smmu, as->pd_dma, 0);
424 smmu_flush_tlb_asid(smmu, as->id);
426 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID);
427 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr);
428 smmu_writel(smmu, value, SMMU_PTB_DATA);
437 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
441 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
442 struct tegra_smmu_as *as)
444 if (--as->use_count > 0)
447 tegra_smmu_free_asid(smmu, as->id);
449 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE);
454 static int tegra_smmu_attach_dev(struct iommu_domain *domain,
457 struct tegra_smmu *smmu = dev->archdata.iommu;
458 struct tegra_smmu_as *as = to_smmu_as(domain);
459 struct device_node *np = dev->of_node;
460 struct of_phandle_args args;
461 unsigned int index = 0;
464 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
466 unsigned int swgroup = args.args[0];
468 if (args.np != smmu->dev->of_node) {
469 of_node_put(args.np);
473 of_node_put(args.np);
475 err = tegra_smmu_as_prepare(smmu, as);
479 tegra_smmu_enable(smmu, swgroup, as->id);
489 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
491 struct tegra_smmu_as *as = to_smmu_as(domain);
492 struct device_node *np = dev->of_node;
493 struct tegra_smmu *smmu = as->smmu;
494 struct of_phandle_args args;
495 unsigned int index = 0;
497 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
499 unsigned int swgroup = args.args[0];
501 if (args.np != smmu->dev->of_node) {
502 of_node_put(args.np);
506 of_node_put(args.np);
508 tegra_smmu_disable(smmu, swgroup, as->id);
509 tegra_smmu_as_unprepare(smmu, as);
514 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
517 unsigned int pd_index = iova_pd_index(iova);
518 struct tegra_smmu *smmu = as->smmu;
519 u32 *pd = page_address(as->pd);
520 unsigned long offset = pd_index * sizeof(*pd);
522 /* Set the page directory entry first */
523 pd[pd_index] = value;
525 /* The flush the page directory entry from caches */
526 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
527 sizeof(*pd), DMA_TO_DEVICE);
529 /* And flush the iommu */
530 smmu_flush_ptc(smmu, as->pd_dma, offset);
531 smmu_flush_tlb_section(smmu, as->id, iova);
535 static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
537 u32 *pt = page_address(pt_page);
539 return pt + iova_pt_index(iova);
542 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
545 unsigned int pd_index = iova_pd_index(iova);
546 struct tegra_smmu *smmu = as->smmu;
547 struct page *pt_page;
550 pt_page = as->pts[pd_index];
554 pd = page_address(as->pd);
555 *dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
557 return tegra_smmu_pte_offset(pt_page, iova);
560 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
563 unsigned int pde = iova_pd_index(iova);
564 struct tegra_smmu *smmu = as->smmu;
570 page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO);
574 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT,
576 if (dma_mapping_error(smmu->dev, dma)) {
581 if (!smmu_dma_addr_valid(smmu, dma)) {
582 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT,
590 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
595 u32 *pd = page_address(as->pd);
597 *dmap = smmu_pde_to_dma(smmu, pd[pde]);
600 return tegra_smmu_pte_offset(as->pts[pde], iova);
603 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
605 unsigned int pd_index = iova_pd_index(iova);
607 as->count[pd_index]++;
610 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
612 unsigned int pde = iova_pd_index(iova);
613 struct page *page = as->pts[pde];
616 * When no entries in this page table are used anymore, return the
617 * memory page to the system.
619 if (--as->count[pde] == 0) {
620 struct tegra_smmu *smmu = as->smmu;
621 u32 *pd = page_address(as->pd);
622 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
624 tegra_smmu_set_pde(as, iova, 0);
626 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE);
632 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
633 u32 *pte, dma_addr_t pte_dma, u32 val)
635 struct tegra_smmu *smmu = as->smmu;
636 unsigned long offset = offset_in_page(pte);
640 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset,
642 smmu_flush_ptc(smmu, pte_dma, offset);
643 smmu_flush_tlb_group(smmu, as->id, iova);
647 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
648 phys_addr_t paddr, size_t size, int prot)
650 struct tegra_smmu_as *as = to_smmu_as(domain);
654 pte = as_get_pte(as, iova, &pte_dma);
658 /* If we aren't overwriting a pre-existing entry, increment use */
660 tegra_smmu_pte_get_use(as, iova);
662 tegra_smmu_set_pte(as, iova, pte, pte_dma,
663 __phys_to_pfn(paddr) | SMMU_PTE_ATTR);
668 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
671 struct tegra_smmu_as *as = to_smmu_as(domain);
675 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
679 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
680 tegra_smmu_pte_put_use(as, iova);
685 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
688 struct tegra_smmu_as *as = to_smmu_as(domain);
693 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
697 pfn = *pte & as->smmu->pfn_mask;
699 return PFN_PHYS(pfn);
702 static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
704 struct platform_device *pdev;
707 pdev = of_find_device_by_node(np);
711 mc = platform_get_drvdata(pdev);
718 static int tegra_smmu_add_device(struct device *dev)
720 struct device_node *np = dev->of_node;
721 struct iommu_group *group;
722 struct of_phandle_args args;
723 unsigned int index = 0;
725 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index,
727 struct tegra_smmu *smmu;
729 smmu = tegra_smmu_find(args.np);
732 * Only a single IOMMU master interface is currently
733 * supported by the Linux kernel, so abort after the
736 dev->archdata.iommu = smmu;
738 iommu_device_link(&smmu->iommu, dev);
746 group = iommu_group_get_for_dev(dev);
748 return PTR_ERR(group);
750 iommu_group_put(group);
755 static void tegra_smmu_remove_device(struct device *dev)
757 struct tegra_smmu *smmu = dev->archdata.iommu;
760 iommu_device_unlink(&smmu->iommu, dev);
762 dev->archdata.iommu = NULL;
763 iommu_group_remove_device(dev);
766 static const struct iommu_ops tegra_smmu_ops = {
767 .capable = tegra_smmu_capable,
768 .domain_alloc = tegra_smmu_domain_alloc,
769 .domain_free = tegra_smmu_domain_free,
770 .attach_dev = tegra_smmu_attach_dev,
771 .detach_dev = tegra_smmu_detach_dev,
772 .add_device = tegra_smmu_add_device,
773 .remove_device = tegra_smmu_remove_device,
774 .device_group = generic_device_group,
775 .map = tegra_smmu_map,
776 .unmap = tegra_smmu_unmap,
777 .map_sg = default_iommu_map_sg,
778 .iova_to_phys = tegra_smmu_iova_to_phys,
780 .pgsize_bitmap = SZ_4K,
783 static void tegra_smmu_ahb_enable(void)
785 static const struct of_device_id ahb_match[] = {
786 { .compatible = "nvidia,tegra30-ahb", },
789 struct device_node *ahb;
791 ahb = of_find_matching_node(NULL, ahb_match);
793 tegra_ahb_enable_smmu(ahb);
798 static int tegra_smmu_swgroups_show(struct seq_file *s, void *data)
800 struct tegra_smmu *smmu = s->private;
804 seq_printf(s, "swgroup enabled ASID\n");
805 seq_printf(s, "------------------------\n");
807 for (i = 0; i < smmu->soc->num_swgroups; i++) {
808 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i];
812 value = smmu_readl(smmu, group->reg);
814 if (value & SMMU_ASID_ENABLE)
819 asid = value & SMMU_ASID_MASK;
821 seq_printf(s, "%-9s %-7s %#04x\n", group->name, status,
828 static int tegra_smmu_swgroups_open(struct inode *inode, struct file *file)
830 return single_open(file, tegra_smmu_swgroups_show, inode->i_private);
833 static const struct file_operations tegra_smmu_swgroups_fops = {
834 .open = tegra_smmu_swgroups_open,
837 .release = single_release,
840 static int tegra_smmu_clients_show(struct seq_file *s, void *data)
842 struct tegra_smmu *smmu = s->private;
846 seq_printf(s, "client enabled\n");
847 seq_printf(s, "--------------------\n");
849 for (i = 0; i < smmu->soc->num_clients; i++) {
850 const struct tegra_mc_client *client = &smmu->soc->clients[i];
853 value = smmu_readl(smmu, client->smmu.reg);
855 if (value & BIT(client->smmu.bit))
860 seq_printf(s, "%-12s %s\n", client->name, status);
866 static int tegra_smmu_clients_open(struct inode *inode, struct file *file)
868 return single_open(file, tegra_smmu_clients_show, inode->i_private);
871 static const struct file_operations tegra_smmu_clients_fops = {
872 .open = tegra_smmu_clients_open,
875 .release = single_release,
878 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
880 smmu->debugfs = debugfs_create_dir("smmu", NULL);
884 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
885 &tegra_smmu_swgroups_fops);
886 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu,
887 &tegra_smmu_clients_fops);
890 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu)
892 debugfs_remove_recursive(smmu->debugfs);
895 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
896 const struct tegra_smmu_soc *soc,
899 struct tegra_smmu *smmu;
904 /* This can happen on Tegra20 which doesn't have an SMMU */
908 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
910 return ERR_PTR(-ENOMEM);
913 * This is a bit of a hack. Ideally we'd want to simply return this
914 * value. However the IOMMU registration process will attempt to add
915 * all devices to the IOMMU when bus_set_iommu() is called. In order
916 * not to rely on global variables to track the IOMMU instance, we
917 * set it here so that it can be looked up from the .add_device()
918 * callback via the IOMMU device's .drvdata field.
922 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long);
924 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL);
926 return ERR_PTR(-ENOMEM);
928 mutex_init(&smmu->lock);
930 smmu->regs = mc->regs;
935 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
936 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
937 mc->soc->num_address_bits, smmu->pfn_mask);
938 smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
939 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
942 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
944 if (soc->supports_request_limit)
945 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8);
947 smmu_writel(smmu, value, SMMU_PTC_CONFIG);
949 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
950 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
952 if (soc->supports_round_robin_arbitration)
953 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
955 smmu_writel(smmu, value, SMMU_TLB_CONFIG);
957 smmu_flush_ptc_all(smmu);
958 smmu_flush_tlb(smmu);
959 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG);
962 tegra_smmu_ahb_enable();
964 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev));
968 iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops);
970 err = iommu_device_register(&smmu->iommu);
972 iommu_device_sysfs_remove(&smmu->iommu);
976 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops);
978 iommu_device_unregister(&smmu->iommu);
979 iommu_device_sysfs_remove(&smmu->iommu);
983 if (IS_ENABLED(CONFIG_DEBUG_FS))
984 tegra_smmu_debugfs_init(smmu);
989 void tegra_smmu_remove(struct tegra_smmu *smmu)
991 iommu_device_unregister(&smmu->iommu);
992 iommu_device_sysfs_remove(&smmu->iommu);
994 if (IS_ENABLED(CONFIG_DEBUG_FS))
995 tegra_smmu_debugfs_exit(smmu);