cregit-Linux how code gets into the kernel

Release 4.11 fs/hfsplus/bitmap.c

Directory: fs/hfsplus
/*
 *  linux/fs/hfsplus/bitmap.c
 *
 * Copyright (C) 2001
 * Brad Boyer (flar@allandria.com)
 * (C) 2003 Ardis Technologies <roman@ardistech.com>
 *
 * Handling of allocation file
 */

#include <linux/pagemap.h>

#include "hfsplus_fs.h"
#include "hfsplus_raw.h"


#define PAGE_CACHE_BITS	(PAGE_SIZE * 8)


int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct page *page; struct address_space *mapping; __be32 *pptr, *curr, *end; u32 mask, start, len, n; __be32 val; int i; len = *max; if (!len) return size; hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len); mutex_lock(&sbi->alloc_mutex); mapping = sbi->alloc_file->i_mapping; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); if (IS_ERR(page)) { start = size; goto out; } pptr = kmap(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; i = offset % 32; offset &= ~(PAGE_CACHE_BITS - 1); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; /* scan the first partial u32 for zero bits */ val = *curr; if (~val) { n = be32_to_cpu(val); mask = (1U << 31) >> i; for (; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } curr++; /* scan complete u32s for the first zero bit */ while (1) { while (curr < end) { val = *curr; if (~val) { n = be32_to_cpu(val); mask = 1 << 31; for (i = 0; i < 32; mask >>= 1, i++) { if (!(n & mask)) goto found; } } curr++; } kunmap(page); offset += PAGE_CACHE_BITS; if (offset >= size) break; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); if (IS_ERR(page)) { start = size; goto out; } curr = pptr = kmap(page); if ((size ^ offset) / PAGE_CACHE_BITS) end = pptr + PAGE_CACHE_BITS / 32; else end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32; } hfs_dbg(BITMAP, "bitmap full\n"); start = size; goto out; found: start = offset + (curr - pptr) * 32 + i; if (start >= size) { hfs_dbg(BITMAP, "bitmap full\n"); goto out; } /* do any partial u32 at the start */ len = min(size - start, len); while (1) { n |= mask; if (++i >= 32) break; mask >>= 1; if (!--len || n & mask) goto done; } if (!--len) goto done; *curr++ = cpu_to_be32(n); /* do full u32s */ while (1) { while (curr < end) { n = be32_to_cpu(*curr); if (len < 32) goto last; if (n) { len = 32; goto last; } *curr++ = cpu_to_be32(0xffffffff); len -= 32; } set_page_dirty(page); kunmap(page); offset += PAGE_CACHE_BITS; page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); if (IS_ERR(page)) { start = size; goto out; } pptr = kmap(page); curr = pptr; end = pptr + PAGE_CACHE_BITS / 32; } last: /* do any partial u32 at end */ mask = 1U << 31; for (i = 0; i < len; i++) { if (n & mask) break; n |= mask; mask >>= 1; } done: *curr = cpu_to_be32(n); set_page_dirty(page); kunmap(page); *max = offset + (curr - pptr) * 32 + i - start; sbi->free_blocks -= *max; hfsplus_mark_mdb_dirty(sb); hfs_dbg(BITMAP, "-> %u,%u\n", start, *max); out: mutex_unlock(&sbi->alloc_mutex); return start; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton68186.53%111.11%
Eric Sesterhenn / Snakebyte486.10%111.11%
Al Viro222.80%111.11%
Christoph Hellwig202.54%222.22%
Joe Perches81.02%111.11%
Artem B. Bityutskiy30.38%111.11%
Pekka J Enberg30.38%111.11%
Jes Sorensen20.25%111.11%
Total787100.00%9100.00%


int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); struct page *page; struct address_space *mapping; __be32 *pptr, *curr, *end; u32 mask, len, pnr; int i; /* is there any actual work to be done? */ if (!count) return 0; hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count); /* are all of the bits in range? */ if ((offset + count) > sbi->total_blocks) return -ENOENT; mutex_lock(&sbi->alloc_mutex); mapping = sbi->alloc_file->i_mapping; pnr = offset / PAGE_CACHE_BITS; page = read_mapping_page(mapping, pnr, NULL); if (IS_ERR(page)) goto kaboom; pptr = kmap(page); curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; end = pptr + PAGE_CACHE_BITS / 32; len = count; /* do any partial u32 at the start */ i = offset % 32; if (i) { int j = 32 - i; mask = 0xffffffffU << j; if (j > count) { mask |= 0xffffffffU >> (i + count); *curr++ &= cpu_to_be32(mask); goto out; } *curr++ &= cpu_to_be32(mask); count -= j; } /* do full u32s */ while (1) { while (curr < end) { if (count < 32) goto done; *curr++ = 0; count -= 32; } if (!count) break; set_page_dirty(page); kunmap(page); page = read_mapping_page(mapping, ++pnr, NULL); if (IS_ERR(page)) goto kaboom; pptr = kmap(page); curr = pptr; end = pptr + PAGE_CACHE_BITS / 32; } done: /* do any partial u32 at end */ if (count) { mask = 0xffffffffU >> count; *curr &= cpu_to_be32(mask); } out: set_page_dirty(page); kunmap(page); sbi->free_blocks += len; hfsplus_mark_mdb_dirty(sb); mutex_unlock(&sbi->alloc_mutex); return 0; kaboom: pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page)); mutex_unlock(&sbi->alloc_mutex); return -EIO; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton33581.31%19.09%
Alan Cox4310.44%19.09%
Christoph Hellwig225.34%218.18%
Joe Perches30.73%218.18%
Artem B. Bityutskiy30.73%19.09%
Pekka J Enberg20.49%19.09%
Jes Sorensen20.49%19.09%
Vyacheslav Dubeyko10.24%19.09%
Al Viro10.24%19.09%
Total412100.00%11100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton102984.83%17.69%
Eric Sesterhenn / Snakebyte483.96%17.69%
Alan Cox433.54%17.69%
Christoph Hellwig423.46%215.38%
Al Viro231.90%17.69%
Joe Perches110.91%215.38%
Artem B. Bityutskiy60.49%17.69%
Pekka J Enberg50.41%17.69%
Jes Sorensen40.33%17.69%
Vyacheslav Dubeyko10.08%17.69%
Kirill A. Shutemov10.08%17.69%
Total1213100.00%13100.00%
Directory: fs/hfsplus
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.