Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Thierry Reding | 2902 | 58.69% | 8 | 18.18% |
Russell King | 926 | 18.73% | 13 | 29.55% |
Hiroshi Doyu | 787 | 15.92% | 10 | 22.73% |
Joerg Roedel | 246 | 4.97% | 8 | 18.18% |
Robin Murphy | 39 | 0.79% | 1 | 2.27% |
Stephen Warren | 33 | 0.67% | 1 | 2.27% |
Yangtao Li | 6 | 0.12% | 1 | 2.27% |
Al Viro | 3 | 0.06% | 1 | 2.27% |
Wei Yongjun | 3 | 0.06% | 1 | 2.27% |
Total | 4945 | 44 |
/* * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/err.h> #include <linux/iommu.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <soc/tegra/ahb.h> #include <soc/tegra/mc.h> struct tegra_smmu_group { struct list_head list; const struct tegra_smmu_group_soc *soc; struct iommu_group *group; }; struct tegra_smmu { void __iomem *regs; struct device *dev; struct tegra_mc *mc; const struct tegra_smmu_soc *soc; struct list_head groups; unsigned long pfn_mask; unsigned long tlb_mask; unsigned long *asids; struct mutex lock; struct list_head list; struct dentry *debugfs; struct iommu_device iommu; /* IOMMU Core code handle */ }; struct tegra_smmu_as { struct iommu_domain domain; struct tegra_smmu *smmu; unsigned int use_count; u32 *count; struct page **pts; struct page *pd; dma_addr_t pd_dma; unsigned id; u32 attr; }; static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom) { return container_of(dom, struct tegra_smmu_as, domain); } static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, unsigned long offset) { writel(value, smmu->regs + offset); } static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) { return readl(smmu->regs + offset); } #define SMMU_CONFIG 0x010 #define SMMU_CONFIG_ENABLE (1 << 0) #define SMMU_TLB_CONFIG 0x14 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) #define SMMU_PTC_CONFIG 0x18 #define SMMU_PTC_CONFIG_ENABLE (1 << 29) #define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) #define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) #define SMMU_PTB_ASID 0x01c #define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) #define SMMU_PTB_DATA 0x020 #define SMMU_PTB_DATA_VALUE(dma, attr) ((dma) >> 12 | (attr)) #define SMMU_MK_PDE(dma, attr) ((dma) >> SMMU_PTE_SHIFT | (attr)) #define SMMU_TLB_FLUSH 0x030 #define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) #define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) #define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) #define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24) #define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ SMMU_TLB_FLUSH_VA_MATCH_SECTION) #define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ SMMU_TLB_FLUSH_VA_MATCH_GROUP) #define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) #define SMMU_PTC_FLUSH 0x034 #define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) #define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) #define SMMU_PTC_FLUSH_HI 0x9b8 #define SMMU_PTC_FLUSH_HI_MASK 0x3 /* per-SWGROUP SMMU_*_ASID register */ #define SMMU_ASID_ENABLE (1 << 31) #define SMMU_ASID_MASK 0x7f #define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) /* page table definitions */ #define SMMU_NUM_PDE 1024 #define SMMU_NUM_PTE 1024 #define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) #define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) #define SMMU_PDE_SHIFT 22 #define SMMU_PTE_SHIFT 12 #define SMMU_PD_READABLE (1 << 31) #define SMMU_PD_WRITABLE (1 << 30) #define SMMU_PD_NONSECURE (1 << 29) #define SMMU_PDE_READABLE (1 << 31) #define SMMU_PDE_WRITABLE (1 << 30) #define SMMU_PDE_NONSECURE (1 << 29) #define SMMU_PDE_NEXT (1 << 28) #define SMMU_PTE_READABLE (1 << 31) #define SMMU_PTE_WRITABLE (1 << 30) #define SMMU_PTE_NONSECURE (1 << 29) #define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ SMMU_PDE_NONSECURE) #define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \ SMMU_PTE_NONSECURE) static unsigned int iova_pd_index(unsigned long iova) { return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); } static unsigned int iova_pt_index(unsigned long iova) { return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); } static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) { addr >>= 12; return (addr & smmu->pfn_mask) == addr; } static dma_addr_t smmu_pde_to_dma(u32 pde) { return pde << 12; } static void smmu_flush_ptc_all(struct tegra_smmu *smmu) { smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); } static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma, unsigned long offset) { u32 value; offset &= ~(smmu->mc->soc->atom_size - 1); if (smmu->mc->soc->num_address_bits > 32) { #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT value = (dma >> 32) & SMMU_PTC_FLUSH_HI_MASK; #else value = 0; #endif smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); } value = (dma + offset) | SMMU_PTC_FLUSH_TYPE_ADR; smmu_writel(smmu, value, SMMU_PTC_FLUSH); } static inline void smmu_flush_tlb(struct tegra_smmu *smmu) { smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); } static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, unsigned long asid) { u32 value; value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | SMMU_TLB_FLUSH_VA_MATCH_ALL; smmu_writel(smmu, value, SMMU_TLB_FLUSH); } static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, unsigned long asid, unsigned long iova) { u32 value; value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | SMMU_TLB_FLUSH_VA_SECTION(iova); smmu_writel(smmu, value, SMMU_TLB_FLUSH); } static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, unsigned long asid, unsigned long iova) { u32 value; value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | SMMU_TLB_FLUSH_VA_GROUP(iova); smmu_writel(smmu, value, SMMU_TLB_FLUSH); } static inline void smmu_flush(struct tegra_smmu *smmu) { smmu_readl(smmu, SMMU_CONFIG); } static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) { unsigned long id; mutex_lock(&smmu->lock); id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); if (id >= smmu->soc->num_asids) { mutex_unlock(&smmu->lock); return -ENOSPC; } set_bit(id, smmu->asids); *idp = id; mutex_unlock(&smmu->lock); return 0; } static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) { mutex_lock(&smmu->lock); clear_bit(id, smmu->asids); mutex_unlock(&smmu->lock); } static bool tegra_smmu_capable(enum iommu_cap cap) { return false; } static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) { struct tegra_smmu_as *as; if (type != IOMMU_DOMAIN_UNMANAGED) return NULL; as = kzalloc(sizeof(*as), GFP_KERNEL); if (!as) return NULL; as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); if (!as->pd) { kfree(as); return NULL; } as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); if (!as->count) { __free_page(as->pd); kfree(as); return NULL; } as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); if (!as->pts) { kfree(as->count); __free_page(as->pd); kfree(as); return NULL; } /* setup aperture */ as->domain.geometry.aperture_start = 0; as->domain.geometry.aperture_end = 0xffffffff; as->domain.geometry.force_aperture = true; return &as->domain; } static void tegra_smmu_domain_free(struct iommu_domain *domain) { struct tegra_smmu_as *as = to_smmu_as(domain); /* TODO: free page directory and page tables */ kfree(as); } static const struct tegra_smmu_swgroup * tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) { const struct tegra_smmu_swgroup *group = NULL; unsigned int i; for (i = 0; i < smmu->soc->num_swgroups; i++) { if (smmu->soc->swgroups[i].swgroup == swgroup) { group = &smmu->soc->swgroups[i]; break; } } return group; } static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, unsigned int asid) { const struct tegra_smmu_swgroup *group; unsigned int i; u32 value; for (i = 0; i < smmu->soc->num_clients; i++) { const struct tegra_mc_client *client = &smmu->soc->clients[i]; if (client->swgroup != swgroup) continue; value = smmu_readl(smmu, client->smmu.reg); value |= BIT(client->smmu.bit); smmu_writel(smmu, value, client->smmu.reg); } group = tegra_smmu_find_swgroup(smmu, swgroup); if (group) { value = smmu_readl(smmu, group->reg); value &= ~SMMU_ASID_MASK; value |= SMMU_ASID_VALUE(asid); value |= SMMU_ASID_ENABLE; smmu_writel(smmu, value, group->reg); } } static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, unsigned int asid) { const struct tegra_smmu_swgroup *group; unsigned int i; u32 value; group = tegra_smmu_find_swgroup(smmu, swgroup); if (group) { value = smmu_readl(smmu, group->reg); value &= ~SMMU_ASID_MASK; value |= SMMU_ASID_VALUE(asid); value &= ~SMMU_ASID_ENABLE; smmu_writel(smmu, value, group->reg); } for (i = 0; i < smmu->soc->num_clients; i++) { const struct tegra_mc_client *client = &smmu->soc->clients[i]; if (client->swgroup != swgroup) continue; value = smmu_readl(smmu, client->smmu.reg); value &= ~BIT(client->smmu.bit); smmu_writel(smmu, value, client->smmu.reg); } } static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, struct tegra_smmu_as *as) { u32 value; int err; if (as->use_count > 0) { as->use_count++; return 0; } as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, DMA_TO_DEVICE); if (dma_mapping_error(smmu->dev, as->pd_dma)) return -ENOMEM; /* We can't handle 64-bit DMA addresses */ if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { err = -ENOMEM; goto err_unmap; } err = tegra_smmu_alloc_asid(smmu, &as->id); if (err < 0) goto err_unmap; smmu_flush_ptc(smmu, as->pd_dma, 0); smmu_flush_tlb_asid(smmu, as->id); smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); smmu_writel(smmu, value, SMMU_PTB_DATA); smmu_flush(smmu); as->smmu = smmu; as->use_count++; return 0; err_unmap: dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); return err; } static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, struct tegra_smmu_as *as) { if (--as->use_count > 0) return; tegra_smmu_free_asid(smmu, as->id); dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); as->smmu = NULL; } static int tegra_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { struct tegra_smmu *smmu = dev->archdata.iommu; struct tegra_smmu_as *as = to_smmu_as(domain); struct device_node *np = dev->of_node; struct of_phandle_args args; unsigned int index = 0; int err = 0; while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, &args)) { unsigned int swgroup = args.args[0]; if (args.np != smmu->dev->of_node) { of_node_put(args.np); continue; } of_node_put(args.np); err = tegra_smmu_as_prepare(smmu, as); if (err < 0) return err; tegra_smmu_enable(smmu, swgroup, as->id); index++; } if (index == 0) return -ENODEV; return 0; } static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) { struct tegra_smmu_as *as = to_smmu_as(domain); struct device_node *np = dev->of_node; struct tegra_smmu *smmu = as->smmu; struct of_phandle_args args; unsigned int index = 0; while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, &args)) { unsigned int swgroup = args.args[0]; if (args.np != smmu->dev->of_node) { of_node_put(args.np); continue; } of_node_put(args.np); tegra_smmu_disable(smmu, swgroup, as->id); tegra_smmu_as_unprepare(smmu, as); index++; } } static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, u32 value) { unsigned int pd_index = iova_pd_index(iova); struct tegra_smmu *smmu = as->smmu; u32 *pd = page_address(as->pd); unsigned long offset = pd_index * sizeof(*pd); /* Set the page directory entry first */ pd[pd_index] = value; /* The flush the page directory entry from caches */ dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, sizeof(*pd), DMA_TO_DEVICE); /* And flush the iommu */ smmu_flush_ptc(smmu, as->pd_dma, offset); smmu_flush_tlb_section(smmu, as->id, iova); smmu_flush(smmu); } static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova) { u32 *pt = page_address(pt_page); return pt + iova_pt_index(iova); } static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, dma_addr_t *dmap) { unsigned int pd_index = iova_pd_index(iova); struct page *pt_page; u32 *pd; pt_page = as->pts[pd_index]; if (!pt_page) return NULL; pd = page_address(as->pd); *dmap = smmu_pde_to_dma(pd[pd_index]); return tegra_smmu_pte_offset(pt_page, iova); } static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, dma_addr_t *dmap) { unsigned int pde = iova_pd_index(iova); struct tegra_smmu *smmu = as->smmu; if (!as->pts[pde]) { struct page *page; dma_addr_t dma; page = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); if (!page) return NULL; dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, DMA_TO_DEVICE); if (dma_mapping_error(smmu->dev, dma)) { __free_page(page); return NULL; } if (!smmu_dma_addr_valid(smmu, dma)) { dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, DMA_TO_DEVICE); __free_page(page); return NULL; } as->pts[pde] = page; tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | SMMU_PDE_NEXT)); *dmap = dma; } else { u32 *pd = page_address(as->pd); *dmap = smmu_pde_to_dma(pd[pde]); } return tegra_smmu_pte_offset(as->pts[pde], iova); } static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) { unsigned int pd_index = iova_pd_index(iova); as->count[pd_index]++; } static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) { unsigned int pde = iova_pd_index(iova); struct page *page = as->pts[pde]; /* * When no entries in this page table are used anymore, return the * memory page to the system. */ if (--as->count[pde] == 0) { struct tegra_smmu *smmu = as->smmu; u32 *pd = page_address(as->pd); dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]); tegra_smmu_set_pde(as, iova, 0); dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); __free_page(page); as->pts[pde] = NULL; } } static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, u32 *pte, dma_addr_t pte_dma, u32 val) { struct tegra_smmu *smmu = as->smmu; unsigned long offset = offset_in_page(pte); *pte = val; dma_sync_single_range_for_device(smmu->dev, pte_dma, offset, 4, DMA_TO_DEVICE); smmu_flush_ptc(smmu, pte_dma, offset); smmu_flush_tlb_group(smmu, as->id, iova); smmu_flush(smmu); } static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { struct tegra_smmu_as *as = to_smmu_as(domain); dma_addr_t pte_dma; u32 *pte; pte = as_get_pte(as, iova, &pte_dma); if (!pte) return -ENOMEM; /* If we aren't overwriting a pre-existing entry, increment use */ if (*pte == 0) tegra_smmu_pte_get_use(as, iova); tegra_smmu_set_pte(as, iova, pte, pte_dma, __phys_to_pfn(paddr) | SMMU_PTE_ATTR); return 0; } static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { struct tegra_smmu_as *as = to_smmu_as(domain); dma_addr_t pte_dma; u32 *pte; pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); if (!pte || !*pte) return 0; tegra_smmu_set_pte(as, iova, pte, pte_dma, 0); tegra_smmu_pte_put_use(as, iova); return size; } static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { struct tegra_smmu_as *as = to_smmu_as(domain); unsigned long pfn; dma_addr_t pte_dma; u32 *pte; pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); if (!pte || !*pte) return 0; pfn = *pte & as->smmu->pfn_mask; return PFN_PHYS(pfn); } static struct tegra_smmu *tegra_smmu_find(struct device_node *np) { struct platform_device *pdev; struct tegra_mc *mc; pdev = of_find_device_by_node(np); if (!pdev) return NULL; mc = platform_get_drvdata(pdev); if (!mc) return NULL; return mc->smmu; } static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, struct of_phandle_args *args) { const struct iommu_ops *ops = smmu->iommu.ops; int err; err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops); if (err < 0) { dev_err(dev, "failed to initialize fwspec: %d\n", err); return err; } err = ops->of_xlate(dev, args); if (err < 0) { dev_err(dev, "failed to parse SW group ID: %d\n", err); iommu_fwspec_free(dev); return err; } return 0; } static int tegra_smmu_add_device(struct device *dev) { struct device_node *np = dev->of_node; struct tegra_smmu *smmu = NULL; struct iommu_group *group; struct of_phandle_args args; unsigned int index = 0; int err; while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, &args) == 0) { smmu = tegra_smmu_find(args.np); if (smmu) { err = tegra_smmu_configure(smmu, dev, &args); of_node_put(args.np); if (err < 0) return err; /* * Only a single IOMMU master interface is currently * supported by the Linux kernel, so abort after the * first match. */ dev->archdata.iommu = smmu; iommu_device_link(&smmu->iommu, dev); break; } of_node_put(args.np); index++; } if (!smmu) return -ENODEV; group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) return PTR_ERR(group); iommu_group_put(group); return 0; } static void tegra_smmu_remove_device(struct device *dev) { struct tegra_smmu *smmu = dev->archdata.iommu; if (smmu) iommu_device_unlink(&smmu->iommu, dev); dev->archdata.iommu = NULL; iommu_group_remove_device(dev); } static const struct tegra_smmu_group_soc * tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup) { unsigned int i, j; for (i = 0; i < smmu->soc->num_groups; i++) for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) if (smmu->soc->groups[i].swgroups[j] == swgroup) return &smmu->soc->groups[i]; return NULL; } static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu, unsigned int swgroup) { const struct tegra_smmu_group_soc *soc; struct tegra_smmu_group *group; soc = tegra_smmu_find_group(smmu, swgroup); if (!soc) return NULL; mutex_lock(&smmu->lock); list_for_each_entry(group, &smmu->groups, list) if (group->soc == soc) { mutex_unlock(&smmu->lock); return group->group; } group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL); if (!group) { mutex_unlock(&smmu->lock); return NULL; } INIT_LIST_HEAD(&group->list); group->soc = soc; group->group = iommu_group_alloc(); if (IS_ERR(group->group)) { devm_kfree(smmu->dev, group); mutex_unlock(&smmu->lock); return NULL; } list_add_tail(&group->list, &smmu->groups); mutex_unlock(&smmu->lock); return group->group; } static struct iommu_group *tegra_smmu_device_group(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct tegra_smmu *smmu = dev->archdata.iommu; struct iommu_group *group; group = tegra_smmu_group_get(smmu, fwspec->ids[0]); if (!group) group = generic_device_group(dev); return group; } static int tegra_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) { u32 id = args->args[0]; return iommu_fwspec_add_ids(dev, &id, 1); } static const struct iommu_ops tegra_smmu_ops = { .capable = tegra_smmu_capable, .domain_alloc = tegra_smmu_domain_alloc, .domain_free = tegra_smmu_domain_free, .attach_dev = tegra_smmu_attach_dev, .detach_dev = tegra_smmu_detach_dev, .add_device = tegra_smmu_add_device, .remove_device = tegra_smmu_remove_device, .device_group = tegra_smmu_device_group, .map = tegra_smmu_map, .unmap = tegra_smmu_unmap, .iova_to_phys = tegra_smmu_iova_to_phys, .of_xlate = tegra_smmu_of_xlate, .pgsize_bitmap = SZ_4K, }; static void tegra_smmu_ahb_enable(void) { static const struct of_device_id ahb_match[] = { { .compatible = "nvidia,tegra30-ahb", }, { } }; struct device_node *ahb; ahb = of_find_matching_node(NULL, ahb_match); if (ahb) { tegra_ahb_enable_smmu(ahb); of_node_put(ahb); } } static int tegra_smmu_swgroups_show(struct seq_file *s, void *data) { struct tegra_smmu *smmu = s->private; unsigned int i; u32 value; seq_printf(s, "swgroup enabled ASID\n"); seq_printf(s, "------------------------\n"); for (i = 0; i < smmu->soc->num_swgroups; i++) { const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; const char *status; unsigned int asid; value = smmu_readl(smmu, group->reg); if (value & SMMU_ASID_ENABLE) status = "yes"; else status = "no"; asid = value & SMMU_ASID_MASK; seq_printf(s, "%-9s %-7s %#04x\n", group->name, status, asid); } return 0; } DEFINE_SHOW_ATTRIBUTE(tegra_smmu_swgroups); static int tegra_smmu_clients_show(struct seq_file *s, void *data) { struct tegra_smmu *smmu = s->private; unsigned int i; u32 value; seq_printf(s, "client enabled\n"); seq_printf(s, "--------------------\n"); for (i = 0; i < smmu->soc->num_clients; i++) { const struct tegra_mc_client *client = &smmu->soc->clients[i]; const char *status; value = smmu_readl(smmu, client->smmu.reg); if (value & BIT(client->smmu.bit)) status = "yes"; else status = "no"; seq_printf(s, "%-12s %s\n", client->name, status); } return 0; } DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients); static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu) { smmu->debugfs = debugfs_create_dir("smmu", NULL); if (!smmu->debugfs) return; debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu, &tegra_smmu_swgroups_fops); debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu, &tegra_smmu_clients_fops); } static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu) { debugfs_remove_recursive(smmu->debugfs); } struct tegra_smmu *tegra_smmu_probe(struct device *dev, const struct tegra_smmu_soc *soc, struct tegra_mc *mc) { struct tegra_smmu *smmu; size_t size; u32 value; int err; /* This can happen on Tegra20 which doesn't have an SMMU */ if (!soc) return NULL; smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); if (!smmu) return ERR_PTR(-ENOMEM); /* * This is a bit of a hack. Ideally we'd want to simply return this * value. However the IOMMU registration process will attempt to add * all devices to the IOMMU when bus_set_iommu() is called. In order * not to rely on global variables to track the IOMMU instance, we * set it here so that it can be looked up from the .add_device() * callback via the IOMMU device's .drvdata field. */ mc->smmu = smmu; size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); if (!smmu->asids) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&smmu->groups); mutex_init(&smmu->lock); smmu->regs = mc->regs; smmu->soc = soc; smmu->dev = dev; smmu->mc = mc; smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1; dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", mc->soc->num_address_bits, smmu->pfn_mask); smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1; dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, smmu->tlb_mask); value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); if (soc->supports_request_limit) value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); smmu_writel(smmu, value, SMMU_PTC_CONFIG); value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); if (soc->supports_round_robin_arbitration) value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; smmu_writel(smmu, value, SMMU_TLB_CONFIG); smmu_flush_ptc_all(smmu); smmu_flush_tlb(smmu); smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); smmu_flush(smmu); tegra_smmu_ahb_enable(); err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); if (err) return ERR_PTR(err); iommu_device_set_ops(&smmu->iommu, &tegra_smmu_ops); iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); err = iommu_device_register(&smmu->iommu); if (err) { iommu_device_sysfs_remove(&smmu->iommu); return ERR_PTR(err); } err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); if (err < 0) { iommu_device_unregister(&smmu->iommu); iommu_device_sysfs_remove(&smmu->iommu); return ERR_PTR(err); } if (IS_ENABLED(CONFIG_DEBUG_FS)) tegra_smmu_debugfs_init(smmu); return smmu; } void tegra_smmu_remove(struct tegra_smmu *smmu) { iommu_device_unregister(&smmu->iommu); iommu_device_sysfs_remove(&smmu->iommu); if (IS_ENABLED(CONFIG_DEBUG_FS)) tegra_smmu_debugfs_exit(smmu); }
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1