cregit-Linux how code gets into the kernel

Release 4.10 fs/jfs/jfs_dmap.c

Directory: fs/jfs
/*
 *   Copyright (C) International Business Machines Corp., 2000-2004
 *   Portions Copyright (C) Tino Reichardt, 2012
 *
 *   This program is free software;  you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 *   the GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program;  if not, write to the Free Software
 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 */

#include <linux/fs.h>
#include <linux/slab.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
#include "jfs_imap.h"
#include "jfs_lock.h"
#include "jfs_metapage.h"
#include "jfs_debug.h"
#include "jfs_discard.h"

/*
 *      SERIALIZATION of the Block Allocation Map.
 *
 *      the working state of the block allocation map is accessed in
 *      two directions:
 *
 *      1) allocation and free requests that start at the dmap
 *         level and move up through the dmap control pages (i.e.
 *         the vast majority of requests).
 *
 *      2) allocation requests that start at dmap control page
 *         level and work down towards the dmaps.
 *
 *      the serialization scheme used here is as follows.
 *
 *      requests which start at the bottom are serialized against each
 *      other through buffers and each requests holds onto its buffers
 *      as it works it way up from a single dmap to the required level
 *      of dmap control page.
 *      requests that start at the top are serialized against each other
 *      and request that start from the bottom by the multiple read/single
 *      write inode lock of the bmap inode. requests starting at the top
 *      take this lock in write mode while request starting at the bottom
 *      take the lock in read mode.  a single top-down request may proceed
 *      exclusively while multiple bottoms-up requests may proceed
 *      simultaneously (under the protection of busy buffers).
 *
 *      in addition to information found in dmaps and dmap control pages,
 *      the working state of the block allocation map also includes read/
 *      write information maintained in the bmap descriptor (i.e. total
 *      free block count, allocation group level free block counts).
 *      a single exclusive lock (BMAP_LOCK) is used to guard this information
 *      in the face of multiple-bottoms up requests.
 *      (lock ordering: IREAD_LOCK, BMAP_LOCK);
 *
 *      accesses to the persistent state of the block allocation map (limited
 *      to the persistent bitmaps in dmaps) is guarded by (busy) buffers.
 */


#define BMAP_LOCK_INIT(bmp)	mutex_init(&bmp->db_bmaplock)

#define BMAP_LOCK(bmp)		mutex_lock(&bmp->db_bmaplock)

#define BMAP_UNLOCK(bmp)	mutex_unlock(&bmp->db_bmaplock)

/*
 * forward references
 */
static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
			int nblocks);
static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
static int dbBackSplit(dmtree_t * tp, int leafno);
static int dbJoin(dmtree_t * tp, int leafno, int newval);
static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
		    int level);
static int dbAllocAny(struct bmap * bmp, s64 nblocks, int l2nb, s64 * results);
static int dbAllocNext(struct bmap * bmp, struct dmap * dp, s64 blkno,
		       int nblocks);
static int dbAllocNear(struct bmap * bmp, struct dmap * dp, s64 blkno,
		       int nblocks,
		       int l2nb, s64 * results);
static int dbAllocDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
		       int nblocks);
static int dbAllocDmapLev(struct bmap * bmp, struct dmap * dp, int nblocks,
			  int l2nb,
			  s64 * results);
static int dbAllocAG(struct bmap * bmp, int agno, s64 nblocks, int l2nb,
		     s64 * results);
static int dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno,
		      s64 * results);
static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
static int dbFindBits(u32 word, int l2nb);
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
		      int nblocks);
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
		      int nblocks);
static int dbMaxBud(u8 * cp);
static int blkstol2(s64 nb);

static int cntlz(u32 value);
static int cnttz(u32 word);

static int dbAllocDmapBU(struct bmap * bmp, struct dmap * dp, s64 blkno,
			 int nblocks);
static int dbInitDmap(struct dmap * dp, s64 blkno, int nblocks);
static int dbInitDmapTree(struct dmap * dp);
static int dbInitTree(struct dmaptree * dtp);
static int dbInitDmapCtl(struct dmapctl * dcp, int level, int i);
static int dbGetL2AGSize(s64 nblocks);

/*
 *      buddy table
 *
 * table used for determining buddy sizes within characters of
 * dmap bitmap words.  the characters themselves serve as indexes
 * into the table, with the table elements yielding the maximum
 * binary buddy of free bits within the character.
 */

static const s8 budtab[256] = {
	3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
	2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0,
	2, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, -1
};

/*
 * NAME:        dbMount()
 *
 * FUNCTION:    initializate the block allocation map.
 *
 *              memory is allocated for the in-core bmap descriptor and
 *              the in-core descriptor is initialized from disk.
 *
 * PARAMETERS:
 *      ipbmap  - pointer to in-core inode for the block map.
 *
 * RETURN VALUES:
 *      0       - success
 *      -ENOMEM - insufficient memory
 *      -EIO    - i/o error
 */

int dbMount(struct inode *ipbmap) { struct bmap *bmp; struct dbmap_disk *dbmp_le; struct metapage *mp; int i; /* * allocate/initialize the in-memory bmap descriptor */ /* allocate memory for the in-memory bmap descriptor */ bmp = kmalloc(sizeof(struct bmap), GFP_KERNEL); if (bmp == NULL) return -ENOMEM; /* read the on-disk bmap descriptor. */ mp = read_metapage(ipbmap, BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { kfree(bmp); return -EIO; } /* copy the on-disk bmap descriptor to its in-memory version. */ dbmp_le = (struct dbmap_disk *) mp->data; bmp->db_mapsize = le64_to_cpu(dbmp_le->dn_mapsize); bmp->db_nfree = le64_to_cpu(dbmp_le->dn_nfree); bmp->db_l2nbperpage = le32_to_cpu(dbmp_le->dn_l2nbperpage); bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag); bmp->db_maxlevel = le32_to_cpu(dbmp_le->dn_maxlevel); bmp->db_maxag = le32_to_cpu(dbmp_le->dn_maxag); bmp->db_agpref = le32_to_cpu(dbmp_le->dn_agpref); bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); for (i = 0; i < MAXAG; i++) bmp->db_agfree[i] = le64_to_cpu(dbmp_le->dn_agfree[i]); bmp->db_agsize = le64_to_cpu(dbmp_le->dn_agsize); bmp->db_maxfreebud = dbmp_le->dn_maxfreebud; /* release the buffer. */ release_metapage(mp); /* bind the bmap inode and the bmap descriptor to each other. */ bmp->db_ipbmap = ipbmap; JFS_SBI(ipbmap->i_sb)->bmap = bmp; memset(bmp->db_active, 0, sizeof(bmp->db_active)); /* * allocate/initialize the bmap lock */ BMAP_LOCK_INIT(bmp); return (0); }

Contributors

PersonTokensPropCommitsCommitProp
dave kleikampdave kleikamp33599.41%583.33%
daniel mackdaniel mack20.59%116.67%
Total337100.00%6100.00%

/* * NAME: dbUnmount() * * FUNCTION: terminate the block allocation map in preparation for * file system unmount. * * the in-core bmap descriptor is written to disk and * the memory for this descriptor is freed. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * * RETURN VALUES: * 0 - success * -EIO - i/o error */
int dbUnmount(struct inode *ipbmap, int mounterror) { struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; if (!(mounterror || isReadOnly(ipbmap))) dbSync(ipbmap); /* * Invalidate the page cache buffers */ truncate_inode_pages(ipbmap->i_mapping, 0); /* free the memory for the in-memory bmap. */ kfree(bmp); return (0); }

Contributors

PersonTokensPropCommitsCommitProp
dave kleikampdave kleikamp65100.00%3100.00%
Total65100.00%3100.00%

/* * dbSync() */
int dbSync(struct inode *ipbmap) { struct dbmap_disk *dbmp_le; struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; struct metapage *mp; int i; /* * write bmap global control page */ /* get the buffer for the on-disk bmap descriptor. */ mp = read_metapage(ipbmap, BMAPBLKNO << JFS_SBI(ipbmap->i_sb)->l2nbperpage, PSIZE, 0); if (mp == NULL) { jfs_err("dbSync: read_metapage failed!"); return -EIO; } /* copy the in-memory version of the bmap to the on-disk version */ dbmp_le = (struct dbmap_disk *) mp->data; dbmp_le->dn_mapsize = cpu_to_le64(bmp->db_mapsize); dbmp_le->dn_nfree = cpu_to_le64(bmp->db_nfree); dbmp_le->dn_l2nbperpage = cpu_to_le32(bmp->db_l2nbperpage); dbmp_le->dn_numag = cpu_to_le32(bmp->db_numag); dbmp_le->dn_maxlevel = cpu_to_le32(bmp->db_maxlevel); dbmp_le->dn_maxag = cpu_to_le32(bmp->db_maxag); dbmp_le->dn_agpref = cpu_to_le32(bmp->db_agpref); dbmp_le->dn_aglevel = cpu_to_le32(bmp->db_aglevel); dbmp_le->dn_agheight = cpu_to_le32(bmp->db_agheight); dbmp_le->dn_agwidth = cpu_to_le32(bmp->db_agwidth); dbmp_le->dn_agstart = cpu_to_le32(bmp->db_agstart); dbmp_le->dn_agl2size = cpu_to_le32(bmp->db_agl2size); for (i = 0; i < MAXAG; i++) dbmp_le->dn_agfree[i] = cpu_to_le64(bmp->db_agfree[i]); dbmp_le->dn_agsize = cpu_to_le64(bmp->db_agsize); dbmp_le->dn_maxfreebud = bmp->db_maxfreebud; /* write the buffer */ write_metapage(mp); /* * write out dirty pages of bmap */ filemap_write_and_wait(ipbmap->i_mapping); diWriteSpecial(ipbmap, 0); return (0); }

Contributors

PersonTokensPropCommitsCommitProp
dave kleikampdave kleikamp29097.64%666.67%
andrew mortonandrew morton41.35%111.11%
daniel mackdaniel mack20.67%111.11%
hirofumi ogawahirofumi ogawa10.34%111.11%
Total297100.00%9100.00%

/* * NAME: dbFree() * * FUNCTION: free the specified block range from the working block * allocation map. * * the blocks will be free from the working map one dmap * at a time. * * PARAMETERS: * ip - pointer to in-core inode; * blkno - starting block number to be freed. * nblocks - number of blocks to be freed. * * RETURN VALUES: * 0 - success * -EIO - i/o error */
int dbFree(struct inode *ip, s64 blkno, s64 nblocks) { struct metapage *mp; struct dmap *dp; int nb, rc; s64 lblkno, rem; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp = JFS_SBI(ip->i_sb)->bmap; struct super_block *sb = ipbmap->i_sb; IREAD_LOCK(ipbmap, RDWRLOCK_DMAP); /* block to be freed better be within the mapsize. */ if (unlikely((blkno == 0) || (blkno + nblocks > bmp->db_mapsize))) { IREAD_UNLOCK(ipbmap); printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", (unsigned long long) blkno, (unsigned long long) nblocks); jfs_error(ip->i_sb, "block to be freed is outside the map\n"); return -EIO; } /** * TRIM the blocks, when mounted with discard option */ if (JFS_SBI(sb)->flag & JFS_DISCARD) if (JFS_SBI(sb)->minblks_trim <= nblocks) jfs_issue_discard(ipbmap, blkno, nblocks); /* * free the blocks a dmap at a time. */ mp = NULL; for (rem = nblocks; rem > 0; rem -= nb, blkno += nb) { /* release previous dmap if any */ if (mp) { write_metapage(mp); } /* get the buffer for the current dmap. */ lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); mp = read_metapage(ipbmap, lblkno, PSIZE, 0); if (mp == NULL) { IREAD_UNLOCK(ipbmap); return -EIO; } dp = (struct dmap *) mp->data; /* determine the number of blocks to be freed from * this dmap. */ nb = min(rem, BPERDMAP - (blkno & (BPERDMAP - 1))); /* free the blocks. */ if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) { jfs_error(ip->i_sb, "error in block map\n"); release_metapage(mp); IREAD_UNLOCK(ipbmap); return (rc); } } /* write the last buffer. */ write_metapage(mp); IREAD_UNLOCK(ipbmap); return (0); }

Contributors

PersonTokensPropCommitsCommitProp
dave kleikampdave kleikamp30187.50%777.78%
tino reichardttino reichardt4111.92%111.11%
joe perchesjoe perches20.58%111.11%
Total344100.00%9100.00%

/* * NAME: dbUpdatePMap() * * FUNCTION: update the allocation state (free or allocate) of the * specified block range in the persistent block allocation map. * * the blocks will be updated in the persistent map one * dmap at a time. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * free - 'true' if block range is to be freed from the persistent * map; 'false' if it is to be allocated. * blkno - starting block number of the range. * nblocks - number of contiguous blocks in the range. * tblk - transaction block; * * RETURN VALUES: * 0 - success * -EIO - i/o error */
int dbUpdatePMap(struct inode *ipbmap, int free, s64 blkno, s64 nblocks, struct tblock * tblk) { int nblks, dbitno, wbitno, rbits; int word, nbits, nwords; struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; s64 lblkno, rem, lastlblkno; u32 mask; struct dmap *dp; struct metapage *mp; struct jfs_log *log; int lsn, difft, diffp; unsigned long flags; /* the blocks better be within the mapsize. */ if (blkno + nblocks > bmp->db_mapsize) { printk(KERN_ERR "blkno = %Lx, nblocks = %Lx\n", (unsigned long long) blkno, (unsigned long long) nblocks); jfs_error(ipbmap->i_sb, "blocks are outside the map\n"); return -EIO; } /* compute delta of transaction lsn from log syncpt */ lsn = tblk->lsn; log = (struct jfs_log *) JFS_SBI(tblk->sb)->log; logdiff(difft, lsn, log); /* * update the block state a dmap at a time. */ mp = NULL; lastlblkno = 0; for (rem = nblocks; rem > 0; rem -= nblks, blkno += nblks) { /* get the buffer for the current dmap. */ lblkno = BLKTODMAP(blkno, bmp->db_l2nbperpage); if (lblkno != lastlblkno) { if (mp) { write_metapage(mp); } mp = read_metapage(bmp->db_ipbmap, lblkno, PSIZE, 0); if (mp == NULL) return -EIO; metapage_wait_for_io(mp); } dp = (struct dmap *) mp->data; /* determine the bit number and word within the dmap of * the starting block. also determine how many blocks * are to be updated within this dmap. */ dbitno = blkno & (BPERDMAP - 1); word = dbitno >> L2DBWORD; nblks = min(rem, (s64)BPERDMAP - dbitno); /* update the bits of the dmap words. the first and last * words may only have a subset of their bits updated. if * this is the case, we'll work against that word (i.e. * partial first and/or last) only in a single pass. a * single pass will also be used to update all words that * are to have all their bits updated. */ for (rbits = nblks; rbits > 0; rbits -= nbits, dbitno += nbits) { /* determine the bit number within the word and * the number of bits within the word. */ wbitno = dbitno & (DBWORD - 1); nbits = min(rbits, DBWORD - wbitno); /* check if only part of the word is to be updated. */ if (nbits < DBWORD) { /* update (free or allocate) the bits * in this word. */ mask = (ONES << (DBWORD - nbits) >> wbitno); if (free) dp->pmap[word] &= cpu_to_le32(~mask); else dp->pmap[word] |= cpu_to_le32(mask); word += 1; } else { /* one or more words are to have all * their bits updated. determine how * many words and how many bits. */ nwords = rbits >> L2DBWORD; nbits = nwords << L2DBWORD; /* update (free or allocate) the bits * in these words. */ if (free) memset(&dp->pmap[word], 0, nwords * 4); else memset(&dp->pmap[word], (int) ONES, nwords * 4); word += nwords; } } /* * update dmap lsn */ if (lblkno == lastlblkno) continue; lastlblkno = lblkno; LOGSYNC_LOCK(log, flags); if (mp->lsn != 0) { /* inherit older/smaller lsn */ logdiff(diffp, mp->lsn, log); if (difft < diffp) { mp->lsn = lsn; /* move bp after tblock in logsync list */ list_move(&mp->synclist, &tblk->synclist); } /* inherit younger/larger clsn */ logdiff(difft, tblk->clsn, log); logdiff(diffp, mp->clsn, log); if (difft > diffp) mp->clsn = tblk->clsn; } else { mp->log = log; mp->lsn = lsn; /* insert bp after tblock in logsync list */ log->count++; list_add(&mp->synclist, &tblk->synclist); mp->clsn = tblk->clsn; } LOGSYNC_UNLOCK(log, flags); } /* write the last buffer. */ if (mp) { write_metapage(mp); } return (0); }

Contributors

PersonTokensPropCommitsCommitProp
dave kleikampdave kleikamp63599.84%888.89%
joe perchesjoe perches10.16%111.11%
Total636100.00%9100.00%

/* * NAME: dbNextAG() * * FUNCTION: find the preferred allocation group for new allocations. * * Within the allocation groups, we maintain a preferred * allocation group which consists of a group with at least * average free space. It is the preferred group that we target * new inode allocation towards. The tie-in between inode * allocation and block allocation occurs as we allocate the * first (data) block of an inode and specify the inode (block) * as the allocation hint for this block. * * We try to avoid having more than one open file growing in * an allocation group, as this will lead to fragmentation. * This differs from the old OS/2 method of trying to keep * empty ags around for large allocations. * * PARAMETERS: * ipbmap - pointer to in-core inode for the block map. * * RETURN VALUES: * the preferred allocation group number. */
int dbNextAG(struct inode *ipbmap) { s64 avgfree; int agpref; s64 hwm = 0; int i; int next_best = -1; struct bmap *bmp = JFS_SBI(ipbmap->i_sb)->bmap; BMAP_LOCK(bmp); /* determine the average number of free blocks within the ags. */ avgfree = (u32)bmp->db_nfree / bmp->db_numag; /* * if the current preferred ag does not have an active allocator * and has at least average freespace, return it */ agpref = bmp->db_agpref; if ((atomic_read(&bmp->db_active[agpref]) == 0) && (bmp->db_agfree[agpref] >= avgfree)) goto unlock; /* From the last preferred ag, find the next one with at least * average free space. */ for (i = 0 ; i < bmp->db_numag; i++, agpref++) { if (agpref == bmp->db_numag) agpref = 0; if (atomic_read(&bmp->db_active[agpref])) /* open file is currently growing in this ag */ continue; if (bmp->db_agfree[agpref] >= avgfree) { /* Return this one */ bmp->db_agpref = agpref; goto unlock; } else if (bmp->db_agfree[agpref] > hwm) { /* Less than avg. freespace, but best so far */ hwm = bmp->db_agfree[agpref]; next_best = agpref; } } /* * If no inactive ag was found with average freespace, use the * next best */ if (next_best != -1) bmp->db_agpref = next_best; /* else leave db_agpref unchanged */ unlock: BMAP_UNLOCK(bmp); /* return the preferred group. */ return (bmp->db_agpref); }

Contributors

PersonTokensPropCommitsCommitProp
dave kleikampdave kleikamp230100.00%4100.00%
Total230100.00%4100.00%

/* * NAME: dbAlloc() * * FUNCTION: attempt to allocate a specified number of contiguous free * blocks from the working allocation block map. * * the block allocation policy uses hints and a multi-step * approach. * * for allocation requests smaller than the number of blocks * per dmap, we first try to allocate the new blocks * immediately following the hint. if these blocks are not * available, we try to allocate blocks near the hint. if * no blocks near the hint are available, we next try to * allocate within the same dmap as contains the hint. * * if no blocks are available in the dmap or the allocation * request is larger than the dmap size, we try to allocate * within the same allocation group as contains the hint. if * this does not succeed, we finally try to allocate anywhere * within the aggregate. * * we also try to allocate anywhere within the aggregate for * for allocation requests larger than the allocation group * size or requests that specify no hint value. * * PARAMETERS: * ip - pointer to in-core inode; * hint - allocation hint. * nblocks - number of contiguous blocks in the range. * results - on successful return, set to the starting block number * of the newly allocated contiguous range. * * RETURN VALUES: * 0 - success * -ENOSPC - insufficient disk resources * -EIO - i/o error */
int dbAlloc(struct inode *ip, s64 hint, s64 nblocks, s64 * results) { int rc, agno; struct inode *ipbmap = JFS_SBI(ip->i_sb)->ipbmap; struct bmap *bmp; struct metapage *mp; s64 lblkno, blkno; struct dmap *dp; int l2nb; s64 mapSize; int writers; /* assert that nblocks is valid */ assert(nblocks > 0); /* get the log2 number of blocks to be allocated. * if the number of blocks is not a log2 multiple, * it will be rounded up to the next log2 multiple. */ l2nb = BLKSTOL2(nblocks); bmp = JFS_SBI(ip->i_sb)->bmap; mapSize = bmp->db_mapsize; /* the hint should be within the map */ if (hint >= mapSize) { jfs_error(ip->i_sb, "the hint is outside the map\n"); return -EIO; } /* if the number of blocks to be allocated is greater than the * allocation group size, try to allocate anywhere. */ if (l2nb > bmp->db_agl2size) { IWRITE_LOCK(ipbmap, RDWRLOCK_DMAP); rc = dbAllocAny(bmp, nblocks, l2nb, results);