Release 4.15 fs/xfs/kmem.h
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_SUPPORT_KMEM_H__
#define __XFS_SUPPORT_KMEM_H__
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
/*
* General memory allocation interfaces
*/
typedef unsigned __bitwise xfs_km_flags_t;
#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
/*
* We use a special process flag to avoid recursive callbacks into
* the filesystem during transactions. We will also issue our own
* warnings, so we explicitly skip any generic ones (silly of us).
*/
static inline gfp_t
kmem_flags_convert(xfs_km_flags_t flags)
{
gfp_t lflags;
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
if (flags & KM_NOSLEEP) {
lflags = GFP_ATOMIC | __GFP_NOWARN;
} else {
lflags = GFP_KERNEL | __GFP_NOWARN;
if (flags & KM_NOFS)
lflags &= ~__GFP_FS;
}
/*
* Default page/slab allocator behavior is to retry for ever
* for small allocations. We can override this behavior by using
* __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
* as it is feasible but rather fail than retry forever for all
* request sizes.
*/
if (flags & KM_MAYFAIL)
lflags |= __GFP_RETRY_MAYFAIL;
if (flags & KM_ZERO)
lflags |= __GFP_ZERO;
return lflags;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Nathan Scott | 24 | 26.97% | 3 | 30.00% |
| Christoph Hellwig | 23 | 25.84% | 2 | 20.00% |
| Stephen Lord | 16 | 17.98% | 1 | 10.00% |
| Gu Zheng | 12 | 13.48% | 1 | 10.00% |
| Michal Hocko | 11 | 12.36% | 1 | 10.00% |
| Al Viro | 3 | 3.37% | 2 | 20.00% |
| Total | 89 | 100.00% | 10 | 100.00% |
extern void *kmem_alloc(size_t, xfs_km_flags_t);
extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
static inline void kmem_free(const void *ptr)
{
kvfree(ptr);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Yalin Wang | 10 | 58.82% | 1 | 20.00% |
| Nathan Scott | 5 | 29.41% | 2 | 40.00% |
| Christoph Hellwig | 1 | 5.88% | 1 | 20.00% |
| Barry Naujok | 1 | 5.88% | 1 | 20.00% |
| Total | 17 | 100.00% | 5 | 100.00% |
static inline void *
kmem_zalloc(size_t size, xfs_km_flags_t flags)
{
return kmem_alloc(size, flags | KM_ZERO);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Gu Zheng | 24 | 100.00% | 1 | 100.00% |
| Total | 24 | 100.00% | 1 | 100.00% |
/*
* Zone interfaces
*/
#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
#define KM_ZONE_SPREAD SLAB_MEM_SPREAD
#define KM_ZONE_ACCOUNT SLAB_ACCOUNT
#define kmem_zone kmem_cache
#define kmem_zone_t struct kmem_cache
static inline kmem_zone_t *
kmem_zone_init(int size, char *zone_name)
{
return kmem_cache_create(zone_name, size, 0, 0, NULL);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Christoph Hellwig | 18 | 62.07% | 2 | 50.00% |
| Nathan Scott | 11 | 37.93% | 2 | 50.00% |
| Total | 29 | 100.00% | 4 | 100.00% |
static inline kmem_zone_t *
kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
void (*construct)(void *))
{
return kmem_cache_create(zone_name, size, 0, flags, construct);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Nathan Scott | 38 | 90.48% | 1 | 25.00% |
| Christoph Lameter | 2 | 4.76% | 1 | 25.00% |
| Christoph Hellwig | 1 | 2.38% | 1 | 25.00% |
| Alexey Dobriyan | 1 | 2.38% | 1 | 25.00% |
| Total | 42 | 100.00% | 4 | 100.00% |
static inline void
kmem_zone_free(kmem_zone_t *zone, void *ptr)
{
kmem_cache_free(zone, ptr);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Christoph Hellwig | 12 | 54.55% | 2 | 50.00% |
| Nathan Scott | 10 | 45.45% | 2 | 50.00% |
| Total | 22 | 100.00% | 4 | 100.00% |
static inline void
kmem_zone_destroy(kmem_zone_t *zone)
{
kmem_cache_destroy(zone);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Christoph Hellwig | 10 | 62.50% | 2 | 50.00% |
| Nathan Scott | 6 | 37.50% | 2 | 50.00% |
| Total | 16 | 100.00% | 4 | 100.00% |
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
static inline void *
kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
{
return kmem_zone_alloc(zone, flags | KM_ZERO);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Gu Zheng | 16 | 64.00% | 1 | 16.67% |
| Christoph Hellwig | 5 | 20.00% | 2 | 33.33% |
| Nathan Scott | 3 | 12.00% | 2 | 33.33% |
| Al Viro | 1 | 4.00% | 1 | 16.67% |
| Total | 25 | 100.00% | 6 | 100.00% |
#endif /* __XFS_SUPPORT_KMEM_H__ */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Nathan Scott | 157 | 40.89% | 6 | 28.57% |
| Christoph Hellwig | 98 | 25.52% | 3 | 14.29% |
| Gu Zheng | 56 | 14.58% | 1 | 4.76% |
| Al Viro | 16 | 4.17% | 2 | 9.52% |
| Stephen Lord | 16 | 4.17% | 1 | 4.76% |
| Michal Hocko | 11 | 2.86% | 1 | 4.76% |
| Dave Chinner | 11 | 2.86% | 1 | 4.76% |
| Yalin Wang | 10 | 2.60% | 1 | 4.76% |
| Vladimir Davydov | 4 | 1.04% | 1 | 4.76% |
| Christoph Lameter | 2 | 0.52% | 1 | 4.76% |
| Alexey Dobriyan | 1 | 0.26% | 1 | 4.76% |
| Paul Jackson | 1 | 0.26% | 1 | 4.76% |
| Barry Naujok | 1 | 0.26% | 1 | 4.76% |
| Total | 384 | 100.00% | 21 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.