Release 4.10 fs/xfs/kmem.c
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include "kmem.h"
#include "xfs_message.h"
/*
* Greedy allocation. May fail and may return vmalloced memory.
*/
void *
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
{
void *ptr;
size_t kmsize = maxsize;
while (!(ptr = vzalloc(kmsize))) {
if ((kmsize >>= 1) <= minsize)
kmsize = minsize;
}
if (ptr)
*size = kmsize;
return ptr;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 64 | 98.46% | 1 | 50.00% |
dave chinner | dave chinner | 1 | 1.54% | 1 | 50.00% |
| Total | 65 | 100.00% | 2 | 100.00% |
void *
kmem_alloc(size_t size, xfs_km_flags_t flags)
{
int retries = 0;
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
do {
ptr = kmalloc(size, lflags);
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
xfs_err(NULL,
"%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)",
current->comm, current->pid,
(unsigned int)size, __func__, lflags);
congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nathan scott | nathan scott | 84 | 78.50% | 2 | 20.00% |
tetsuo handa | tetsuo handa | 8 | 7.48% | 1 | 10.00% |
eric sandeen | eric sandeen | 7 | 6.54% | 1 | 10.00% |
dave chinner | dave chinner | 3 | 2.80% | 1 | 10.00% |
al viro | al viro | 2 | 1.87% | 2 | 20.00% |
andrew morton | andrew morton | 1 | 0.93% | 1 | 10.00% |
harvey harrison | harvey harrison | 1 | 0.93% | 1 | 10.00% |
jens axboe | jens axboe | 1 | 0.93% | 1 | 10.00% |
| Total | 107 | 100.00% | 10 | 100.00% |
void *
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
{
unsigned noio_flag = 0;
void *ptr;
gfp_t lflags;
ptr = kmem_zalloc(size, flags | KM_MAYFAIL);
if (ptr)
return ptr;
/*
* __vmalloc() will allocate data pages and auxillary structures (e.g.
* pagetables) with GFP_KERNEL, yet we may be under GFP_NOFS context
* here. Hence we need to tell memory reclaim that we are in such a
* context via PF_MEMALLOC_NOIO to prevent memory reclaim re-entering
* the filesystem here and potentially deadlocking.
*/
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
noio_flag = memalloc_noio_save();
lflags = kmem_flags_convert(flags);
ptr = __vmalloc(size, lflags | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS))
memalloc_noio_restore(noio_flag);
return ptr;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
dave chinner | dave chinner | 108 | 98.18% | 2 | 66.67% |
nathan scott | nathan scott | 2 | 1.82% | 1 | 33.33% |
| Total | 110 | 100.00% | 3 | 100.00% |
void *
kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
{
int retries = 0;
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
do {
ptr = krealloc(old, newsize, lflags);
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
xfs_err(NULL,
"%s(%u) possible memory allocation deadlock size %zu in %s (mode:0x%x)",
current->comm, current->pid,
newsize, __func__, lflags);
congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 68 | 61.82% | 1 | 20.00% |
nathan scott | nathan scott | 39 | 35.45% | 1 | 20.00% |
al viro | al viro | 1 | 0.91% | 1 | 20.00% |
barry naujok | barry naujok | 1 | 0.91% | 1 | 20.00% |
dave chinner | dave chinner | 1 | 0.91% | 1 | 20.00% |
| Total | 110 | 100.00% | 5 | 100.00% |
void *
kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
{
int retries = 0;
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
do {
ptr = kmem_cache_alloc(zone, lflags);
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
xfs_err(NULL,
"%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
current->comm, current->pid,
__func__, lflags);
congestion_wait(BLK_RW_ASYNC, HZ/50);
} while (1);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nathan scott | nathan scott | 85 | 83.33% | 2 | 22.22% |
tetsuo handa | tetsuo handa | 9 | 8.82% | 1 | 11.11% |
dave chinner | dave chinner | 3 | 2.94% | 1 | 11.11% |
al viro | al viro | 2 | 1.96% | 2 | 22.22% |
jens axboe | jens axboe | 1 | 0.98% | 1 | 11.11% |
andrew morton | andrew morton | 1 | 0.98% | 1 | 11.11% |
harvey harrison | harvey harrison | 1 | 0.98% | 1 | 11.11% |
| Total | 102 | 100.00% | 9 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
nathan scott | nathan scott | 226 | 43.46% | 4 | 22.22% |
christoph hellwig | christoph hellwig | 132 | 25.38% | 2 | 11.11% |
dave chinner | dave chinner | 120 | 23.08% | 3 | 16.67% |
tetsuo handa | tetsuo handa | 17 | 3.27% | 1 | 5.56% |
eric sandeen | eric sandeen | 7 | 1.35% | 1 | 5.56% |
al viro | al viro | 5 | 0.96% | 2 | 11.11% |
andrew morton | andrew morton | 5 | 0.96% | 1 | 5.56% |
tejun heo | tejun heo | 3 | 0.58% | 1 | 5.56% |
jens axboe | jens axboe | 2 | 0.38% | 1 | 5.56% |
harvey harrison | harvey harrison | 2 | 0.38% | 1 | 5.56% |
barry naujok | barry naujok | 1 | 0.19% | 1 | 5.56% |
| Total | 520 | 100.00% | 18 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.