/* * User address space access functions. * * For licencing details see kernel-base/COPYING */ #include <linux/uaccess.h> #include <linux/export.h> /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. */
unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n) { unsigned long ret; if (__range_not_ok(from, n, TASK_SIZE)) return n; /* * Even though this function is typically called from NMI/IRQ context * disable pagefaults so that its behaviour is consistent even when * called form other contexts. */ pagefault_disable(); ret = __copy_from_user_inatomic(to, from, n); pagefault_enable(); return ret; }Contributors
Person | Tokens | Prop | Commits | CommitProp |
Robert Richter | 36 | 61.02% | 1 | 25.00% |
Arun Sharma | 12 | 20.34% | 1 | 25.00% |
Peter Zijlstra | 10 | 16.95% | 1 | 25.00% |
Yann Droneaud | 1 | 1.69% | 1 | 25.00% |
Total | 59 | 100.00% | 4 | 100.00% |
Person | Tokens | Prop | Commits | CommitProp |
Robert Richter | 46 | 63.89% | 1 | 16.67% |
Arun Sharma | 12 | 16.67% | 1 | 16.67% |
Peter Zijlstra | 11 | 15.28% | 1 | 16.67% |
Paul Gortmaker | 1 | 1.39% | 1 | 16.67% |
Al Viro | 1 | 1.39% | 1 | 16.67% |
Yann Droneaud | 1 | 1.39% | 1 | 16.67% |
Total | 72 | 100.00% | 6 | 100.00% |