Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Dan J Williams | 2063 | 80.62% | 28 | 60.87% |
Dave Jiang | 447 | 17.47% | 11 | 23.91% |
Oliver O'Halloran | 20 | 0.78% | 1 | 2.17% |
Jeff Layton | 11 | 0.43% | 1 | 2.17% |
Souptick Joarder | 5 | 0.20% | 1 | 2.17% |
Jeff Moyer | 4 | 0.16% | 1 | 2.17% |
Pushkar Jambhlekar | 4 | 0.16% | 1 | 2.17% |
Jan Kara | 3 | 0.12% | 1 | 2.17% |
Logan Gunthorpe | 2 | 0.08% | 1 | 2.17% |
Total | 2559 | 46 |
// SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */ #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/module.h> #include <linux/device.h> #include <linux/pfn_t.h> #include <linux/cdev.h> #include <linux/slab.h> #include <linux/dax.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include "dax-private.h" #include "bus.h" static struct dev_dax *ref_to_dev_dax(struct percpu_ref *ref) { return container_of(ref, struct dev_dax, ref); } static void dev_dax_percpu_release(struct percpu_ref *ref) { struct dev_dax *dev_dax = ref_to_dev_dax(ref); dev_dbg(&dev_dax->dev, "%s\n", __func__); complete(&dev_dax->cmp); } static void dev_dax_percpu_exit(struct percpu_ref *ref) { struct dev_dax *dev_dax = ref_to_dev_dax(ref); dev_dbg(&dev_dax->dev, "%s\n", __func__); wait_for_completion(&dev_dax->cmp); percpu_ref_exit(ref); } static void dev_dax_percpu_kill(struct percpu_ref *data) { struct percpu_ref *ref = data; struct dev_dax *dev_dax = ref_to_dev_dax(ref); dev_dbg(&dev_dax->dev, "%s\n", __func__); percpu_ref_kill(ref); } static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, const char *func) { struct dax_region *dax_region = dev_dax->region; struct device *dev = &dev_dax->dev; unsigned long mask; if (!dax_alive(dev_dax->dax_dev)) return -ENXIO; /* prevent private mappings from being established */ if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { dev_info_ratelimited(dev, "%s: %s: fail, attempted private mapping\n", current->comm, func); return -EINVAL; } mask = dax_region->align - 1; if (vma->vm_start & mask || vma->vm_end & mask) { dev_info_ratelimited(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", current->comm, func, vma->vm_start, vma->vm_end, mask); return -EINVAL; } if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV && (vma->vm_flags & VM_DONTCOPY) == 0) { dev_info_ratelimited(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", current->comm, func); return -EINVAL; } if (!vma_is_dax(vma)) { dev_info_ratelimited(dev, "%s: %s: fail, vma is not DAX capable\n", current->comm, func); return -EINVAL; } return 0; } /* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, unsigned long size) { struct resource *res = &dev_dax->region->res; phys_addr_t phys; phys = pgoff * PAGE_SIZE + res->start; if (phys >= res->start && phys <= res->end) { if (phys + size - 1 <= res->end) return phys; } return -1; } static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, struct vm_fault *vmf, pfn_t *pfn) { struct device *dev = &dev_dax->dev; struct dax_region *dax_region; phys_addr_t phys; unsigned int fault_size = PAGE_SIZE; if (check_vma(dev_dax, vmf->vma, __func__)) return VM_FAULT_SIGBUS; dax_region = dev_dax->region; if (dax_region->align > PAGE_SIZE) { dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", dax_region->align, fault_size); return VM_FAULT_SIGBUS; } if (fault_size != dax_region->align) return VM_FAULT_SIGBUS; phys = dax_pgoff_to_phys(dev_dax, vmf->pgoff, PAGE_SIZE); if (phys == -1) { dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", vmf->pgoff); return VM_FAULT_SIGBUS; } *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); } static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, struct vm_fault *vmf, pfn_t *pfn) { unsigned long pmd_addr = vmf->address & PMD_MASK; struct device *dev = &dev_dax->dev; struct dax_region *dax_region; phys_addr_t phys; pgoff_t pgoff; unsigned int fault_size = PMD_SIZE; if (check_vma(dev_dax, vmf->vma, __func__)) return VM_FAULT_SIGBUS; dax_region = dev_dax->region; if (dax_region->align > PMD_SIZE) { dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", dax_region->align, fault_size); return VM_FAULT_SIGBUS; } /* dax pmd mappings require pfn_t_devmap() */ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { dev_dbg(dev, "region lacks devmap flags\n"); return VM_FAULT_SIGBUS; } if (fault_size < dax_region->align) return VM_FAULT_SIGBUS; else if (fault_size > dax_region->align) return VM_FAULT_FALLBACK; /* if we are outside of the VMA */ if (pmd_addr < vmf->vma->vm_start || (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) return VM_FAULT_SIGBUS; pgoff = linear_page_index(vmf->vma, pmd_addr); phys = dax_pgoff_to_phys(dev_dax, pgoff, PMD_SIZE); if (phys == -1) { dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); return VM_FAULT_SIGBUS; } *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf, pfn_t *pfn) { unsigned long pud_addr = vmf->address & PUD_MASK; struct device *dev = &dev_dax->dev; struct dax_region *dax_region; phys_addr_t phys; pgoff_t pgoff; unsigned int fault_size = PUD_SIZE; if (check_vma(dev_dax, vmf->vma, __func__)) return VM_FAULT_SIGBUS; dax_region = dev_dax->region; if (dax_region->align > PUD_SIZE) { dev_dbg(dev, "alignment (%#x) > fault size (%#x)\n", dax_region->align, fault_size); return VM_FAULT_SIGBUS; } /* dax pud mappings require pfn_t_devmap() */ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { dev_dbg(dev, "region lacks devmap flags\n"); return VM_FAULT_SIGBUS; } if (fault_size < dax_region->align) return VM_FAULT_SIGBUS; else if (fault_size > dax_region->align) return VM_FAULT_FALLBACK; /* if we are outside of the VMA */ if (pud_addr < vmf->vma->vm_start || (pud_addr + PUD_SIZE) > vmf->vma->vm_end) return VM_FAULT_SIGBUS; pgoff = linear_page_index(vmf->vma, pud_addr); phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); if (phys == -1) { dev_dbg(dev, "pgoff_to_phys(%#lx) failed\n", pgoff); return VM_FAULT_SIGBUS; } *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); } #else static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, struct vm_fault *vmf, pfn_t *pfn) { return VM_FAULT_FALLBACK; } #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) { struct file *filp = vmf->vma->vm_file; unsigned long fault_size; vm_fault_t rc = VM_FAULT_SIGBUS; int id; pfn_t pfn; struct dev_dax *dev_dax = filp->private_data; dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm, (vmf->flags & FAULT_FLAG_WRITE) ? "write" : "read", vmf->vma->vm_start, vmf->vma->vm_end, pe_size); id = dax_read_lock(); switch (pe_size) { case PE_SIZE_PTE: fault_size = PAGE_SIZE; rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn); break; case PE_SIZE_PMD: fault_size = PMD_SIZE; rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn); break; case PE_SIZE_PUD: fault_size = PUD_SIZE; rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn); break; default: rc = VM_FAULT_SIGBUS; } if (rc == VM_FAULT_NOPAGE) { unsigned long i; pgoff_t pgoff; /* * In the device-dax case the only possibility for a * VM_FAULT_NOPAGE result is when device-dax capacity is * mapped. No need to consider the zero page, or racing * conflicting mappings. */ pgoff = linear_page_index(vmf->vma, vmf->address & ~(fault_size - 1)); for (i = 0; i < fault_size / PAGE_SIZE; i++) { struct page *page; page = pfn_to_page(pfn_t_to_pfn(pfn) + i); if (page->mapping) continue; page->mapping = filp->f_mapping; page->index = pgoff + i; } } dax_read_unlock(id); return rc; } static vm_fault_t dev_dax_fault(struct vm_fault *vmf) { return dev_dax_huge_fault(vmf, PE_SIZE_PTE); } static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr) { struct file *filp = vma->vm_file; struct dev_dax *dev_dax = filp->private_data; struct dax_region *dax_region = dev_dax->region; if (!IS_ALIGNED(addr, dax_region->align)) return -EINVAL; return 0; } static unsigned long dev_dax_pagesize(struct vm_area_struct *vma) { struct file *filp = vma->vm_file; struct dev_dax *dev_dax = filp->private_data; struct dax_region *dax_region = dev_dax->region; return dax_region->align; } static const struct vm_operations_struct dax_vm_ops = { .fault = dev_dax_fault, .huge_fault = dev_dax_huge_fault, .split = dev_dax_split, .pagesize = dev_dax_pagesize, }; static int dax_mmap(struct file *filp, struct vm_area_struct *vma) { struct dev_dax *dev_dax = filp->private_data; int rc, id; dev_dbg(&dev_dax->dev, "trace\n"); /* * We lock to check dax_dev liveness and will re-check at * fault time. */ id = dax_read_lock(); rc = check_vma(dev_dax, vma, __func__); dax_read_unlock(id); if (rc) return rc; vma->vm_ops = &dax_vm_ops; vma->vm_flags |= VM_HUGEPAGE; return 0; } /* return an unmapped area aligned to the dax region specified alignment */ static unsigned long dax_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long off, off_end, off_align, len_align, addr_align, align; struct dev_dax *dev_dax = filp ? filp->private_data : NULL; struct dax_region *dax_region; if (!dev_dax || addr) goto out; dax_region = dev_dax->region; align = dax_region->align; off = pgoff << PAGE_SHIFT; off_end = off + len; off_align = round_up(off, align); if ((off_end <= off_align) || ((off_end - off_align) < align)) goto out; len_align = len + align; if ((off + len_align) < off) goto out; addr_align = current->mm->get_unmapped_area(filp, addr, len_align, pgoff, flags); if (!IS_ERR_VALUE(addr_align)) { addr_align += (off - addr_align) & (align - 1); return addr_align; } out: return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); } static const struct address_space_operations dev_dax_aops = { .set_page_dirty = noop_set_page_dirty, .invalidatepage = noop_invalidatepage, }; static int dax_open(struct inode *inode, struct file *filp) { struct dax_device *dax_dev = inode_dax(inode); struct inode *__dax_inode = dax_inode(dax_dev); struct dev_dax *dev_dax = dax_get_private(dax_dev); dev_dbg(&dev_dax->dev, "trace\n"); inode->i_mapping = __dax_inode->i_mapping; inode->i_mapping->host = __dax_inode; inode->i_mapping->a_ops = &dev_dax_aops; filp->f_mapping = inode->i_mapping; filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); filp->private_data = dev_dax; inode->i_flags = S_DAX; return 0; } static int dax_release(struct inode *inode, struct file *filp) { struct dev_dax *dev_dax = filp->private_data; dev_dbg(&dev_dax->dev, "trace\n"); return 0; } static const struct file_operations dax_fops = { .llseek = noop_llseek, .owner = THIS_MODULE, .open = dax_open, .release = dax_release, .get_unmapped_area = dax_get_unmapped_area, .mmap = dax_mmap, .mmap_supported_flags = MAP_SYNC, }; static void dev_dax_cdev_del(void *cdev) { cdev_del(cdev); } static void dev_dax_kill(void *dev_dax) { kill_dev_dax(dev_dax); } int dev_dax_probe(struct device *dev) { struct dev_dax *dev_dax = to_dev_dax(dev); struct dax_device *dax_dev = dev_dax->dax_dev; struct resource *res = &dev_dax->region->res; struct inode *inode; struct cdev *cdev; void *addr; int rc; /* 1:1 map region resource range to device-dax instance range */ if (!devm_request_mem_region(dev, res->start, resource_size(res), dev_name(dev))) { dev_warn(dev, "could not reserve region %pR\n", res); return -EBUSY; } init_completion(&dev_dax->cmp); rc = percpu_ref_init(&dev_dax->ref, dev_dax_percpu_release, 0, GFP_KERNEL); if (rc) return rc; dev_dax->pgmap.ref = &dev_dax->ref; dev_dax->pgmap.kill = dev_dax_percpu_kill; dev_dax->pgmap.cleanup = dev_dax_percpu_exit; addr = devm_memremap_pages(dev, &dev_dax->pgmap); if (IS_ERR(addr)) return PTR_ERR(addr); inode = dax_inode(dax_dev); cdev = inode->i_cdev; cdev_init(cdev, &dax_fops); if (dev->class) { /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */ cdev->owner = dev->parent->driver->owner; } else cdev->owner = dev->driver->owner; cdev_set_parent(cdev, &dev->kobj); rc = cdev_add(cdev, dev->devt, 1); if (rc) return rc; rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev); if (rc) return rc; run_dax(dax_dev); return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax); } EXPORT_SYMBOL_GPL(dev_dax_probe); static int dev_dax_remove(struct device *dev) { /* all probe actions are unwound by devm */ return 0; } static struct dax_device_driver device_dax_driver = { .drv = { .probe = dev_dax_probe, .remove = dev_dax_remove, }, .match_always = 1, }; static int __init dax_init(void) { return dax_driver_register(&device_dax_driver); } static void __exit dax_exit(void) { dax_driver_unregister(&device_dax_driver); } MODULE_AUTHOR("Intel Corporation"); MODULE_LICENSE("GPL v2"); module_init(dax_init); module_exit(dax_exit); MODULE_ALIAS_DAX_DEVICE(0);
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1