cregit-Linux how code gets into the kernel

Release 4.15 kernel/power/snapshot.c

Directory: kernel/power
/*
 * linux/kernel/power/snapshot.c
 *
 * This file provides system snapshot/restore functionality for swsusp.
 *
 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
 *
 * This file is released under the GPLv2.
 *
 */


#define pr_fmt(fmt) "PM: " fmt

#include <linux/version.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/nmi.h>
#include <linux/syscalls.h>
#include <linux/console.h>
#include <linux/highmem.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/compiler.h>
#include <linux/ktime.h>
#include <linux/set_memory.h>

#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>

#include "power.h"

#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)

static bool hibernate_restore_protection;

static bool hibernate_restore_protection_active;


void enable_restore_image_protection(void) { hibernate_restore_protection = true; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki11100.00%1100.00%
Total11100.00%1100.00%


static inline void hibernate_restore_protection_begin(void) { hibernate_restore_protection_active = hibernate_restore_protection; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki13100.00%1100.00%
Total13100.00%1100.00%


static inline void hibernate_restore_protection_end(void) { hibernate_restore_protection_active = false; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki13100.00%1100.00%
Total13100.00%1100.00%


static inline void hibernate_restore_protect_page(void *page_address) { if (hibernate_restore_protection_active) set_memory_ro((unsigned long)page_address, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki26100.00%1100.00%
Total26100.00%1100.00%


static inline void hibernate_restore_unprotect_page(void *page_address) { if (hibernate_restore_protection_active) set_memory_rw((unsigned long)page_address, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki26100.00%1100.00%
Total26100.00%1100.00%

#else
static inline void hibernate_restore_protection_begin(void) {}

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki8100.00%1100.00%
Total8100.00%1100.00%


static inline void hibernate_restore_protection_end(void) {}

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki8100.00%1100.00%
Total8100.00%1100.00%


static inline void hibernate_restore_protect_page(void *page_address) {}

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki10100.00%1100.00%
Total10100.00%1100.00%


static inline void hibernate_restore_unprotect_page(void *page_address) {}

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki10100.00%1100.00%
Total10100.00%1100.00%

#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */ static int swsusp_page_is_free(struct page *); static void swsusp_set_page_forbidden(struct page *); static void swsusp_unset_page_forbidden(struct page *); /* * Number of bytes to reserve for memory allocations made by device drivers * from their ->freeze() and ->freeze_noirq() callbacks so that they don't * cause image creation to fail (tunable via /sys/power/reserved_size). */ unsigned long reserved_size;
void __init hibernate_reserved_size_init(void) { reserved_size = SPARE_PAGES * PAGE_SIZE; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki14100.00%1100.00%
Total14100.00%1100.00%

/* * Preferred image size in bytes (tunable via /sys/power/image_size). * When it is set to N, swsusp will do its best to ensure the image * size will not exceed N bytes, but if that is impossible, it will * try to create the smallest image possible. */ unsigned long image_size;
void __init hibernate_image_size_init(void) { image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki22100.00%2100.00%
Total22100.00%2100.00%

/* * List of PBEs needed for restoring the pages that were allocated before * the suspend and included in the suspend image, but have also been * allocated by the "resume" kernel, so their contents cannot be written * directly to their "original" page frames. */ struct pbe *restore_pblist; /* struct linked_page is used to build chains of pages */ #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *)) struct linked_page { struct linked_page *next; char data[LINKED_PAGE_DATA_SIZE]; } __packed; /* * List of "safe" pages (ie. pages that were not used by the image kernel * before hibernation) that may be used as temporary storage for image kernel * memory contents. */ static struct linked_page *safe_pages_list; /* Pointer to an auxiliary buffer (1 page) */ static void *buffer; #define PG_ANY 0 #define PG_SAFE 1 #define PG_UNSAFE_CLEAR 1 #define PG_UNSAFE_KEEP 0 static unsigned int allocated_unsafe_pages; /** * get_image_page - Allocate a page for a hibernation image. * @gfp_mask: GFP mask for the allocation. * @safe_needed: Get pages that were not used before hibernation (restore only) * * During image restoration, for storing the PBE list and the image data, we can * only use memory pages that do not conflict with the pages used before * hibernation. The "unsafe" pages have PageNosaveFree set and we count them * using allocated_unsafe_pages. * * Each allocated image page is marked as PageNosave and PageNosaveFree so that * swsusp_free() can release it. */
static void *get_image_page(gfp_t gfp_mask, int safe_needed) { void *res; res = (void *)get_zeroed_page(gfp_mask); if (safe_needed) while (res && swsusp_page_is_free(virt_to_page(res))) { /* The page is unsafe, mark it for swsusp_free() */ swsusp_set_page_forbidden(virt_to_page(res)); allocated_unsafe_pages++; res = (void *)get_zeroed_page(gfp_mask); } if (res) { swsusp_set_page_forbidden(virt_to_page(res)); swsusp_set_page_free(virt_to_page(res)); } return res; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki94100.00%5100.00%
Total94100.00%5100.00%


static void *__get_safe_page(gfp_t gfp_mask) { if (safe_pages_list) { void *ret = safe_pages_list; safe_pages_list = safe_pages_list->next; memset(ret, 0, PAGE_SIZE); return ret; } return get_image_page(gfp_mask, PG_SAFE); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki48100.00%1100.00%
Total48100.00%1100.00%


unsigned long get_safe_page(gfp_t gfp_mask) { return (unsigned long)__get_safe_page(gfp_mask); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki19100.00%3100.00%
Total19100.00%3100.00%


static struct page *alloc_image_page(gfp_t gfp_mask) { struct page *page; page = alloc_page(gfp_mask); if (page) { swsusp_set_page_forbidden(page); swsusp_set_page_free(page); } return page; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki42100.00%3100.00%
Total42100.00%3100.00%


static void recycle_safe_page(void *page_address) { struct linked_page *lp = page_address; lp->next = safe_pages_list; safe_pages_list = lp; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki27100.00%1100.00%
Total27100.00%1100.00%

/** * free_image_page - Free a page allocated for hibernation image. * @addr: Address of the page to free. * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page. * * The page to free should have been allocated by get_image_page() (page flags * set by it are affected). */
static inline void free_image_page(void *addr, int clear_nosave_free) { struct page *page; BUG_ON(!virt_addr_valid(addr)); page = virt_to_page(addr); swsusp_unset_page_forbidden(page); if (clear_nosave_free) swsusp_unset_page_free(page); __free_page(page); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki5092.59%360.00%
Pavel Machek35.56%120.00%
David Shaohua Li11.85%120.00%
Total54100.00%5100.00%


static inline void free_list_of_pages(struct linked_page *list, int clear_page_nosave) { while (list) { struct linked_page *lp = list->next; free_image_page(list, clear_page_nosave); list = lp; } }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki3380.49%133.33%
David Shaohua Li717.07%133.33%
Adrian Bunk12.44%133.33%
Total41100.00%3100.00%

/* * struct chain_allocator is used for allocating small objects out of * a linked list of pages called 'the chain'. * * The chain grows each time when there is no room for a new object in * the current page. The allocated objects cannot be freed individually. * It is only possible to free them all at once, by freeing the entire * chain. * * NOTE: The chain allocator may be inefficient if the allocated objects * are not much smaller than PAGE_SIZE. */ struct chain_allocator { struct linked_page *chain; /* the chain */ unsigned int used_space; /* total size of objects allocated out of the current page */ gfp_t gfp_mask; /* mask for allocating pages */ int safe_needed; /* if set, only "safe" pages are allocated */ };
static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed) { ca->chain = NULL; ca->used_space = LINKED_PAGE_DATA_SIZE; ca->gfp_mask = gfp_mask; ca->safe_needed = safe_needed; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki41100.00%2100.00%
Total41100.00%2100.00%


static void *chain_alloc(struct chain_allocator *ca, unsigned int size) { void *ret; if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { struct linked_page *lp; lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) : get_image_page(ca->gfp_mask, PG_ANY); if (!lp) return NULL; lp->next = ca->chain; ca->chain = lp; ca->used_space = 0; } ret = ca->chain->data + ca->used_space; ca->used_space += size; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki108100.00%3100.00%
Total108100.00%3100.00%

/** * Data types related to memory bitmaps. * * Memory bitmap is a structure consiting of many linked lists of * objects. The main list's elements are of type struct zone_bitmap * and each of them corresonds to one zone. For each zone bitmap * object there is a list of objects of type struct bm_block that * represent each blocks of bitmap in which information is stored. * * struct memory_bitmap contains a pointer to the main list of zone * bitmap objects, a struct bm_position used for browsing the bitmap, * and a pointer to the list of pages used for allocating all of the * zone bitmap objects and bitmap block objects. * * NOTE: It has to be possible to lay out the bitmap in memory * using only allocations of order 0. Additionally, the bitmap is * designed to work with arbitrary number of zones (this is over the * top for now, but let's avoid making unnecessary assumptions ;-). * * struct zone_bitmap contains a pointer to a list of bitmap block * objects and a pointer to the bitmap block object that has been * most recently used for setting bits. Additionally, it contains the * PFNs that correspond to the start and end of the represented zone. * * struct bm_block contains a pointer to the memory page in which * information is stored (in the form of a block of bitmap) * It also contains the pfns that correspond to the start and end of * the represented memory area. * * The memory bitmap is organized as a radix tree to guarantee fast random * access to the bits. There is one radix tree for each zone (as returned * from create_mem_extents). * * One radix tree is represented by one struct mem_zone_bm_rtree. There are * two linked lists for the nodes of the tree, one for the inner nodes and * one for the leave nodes. The linked leave nodes are used for fast linear * access of the memory bitmap. * * The struct rtree_node represents one node of the radix tree. */ #define BM_END_OF_MAP (~0UL) #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE) #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3) #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1) /* * struct rtree_node is a wrapper struct to link the nodes * of the rtree together for easy linear iteration over * bits and easy freeing */ struct rtree_node { struct list_head list; unsigned long *data; }; /* * struct mem_zone_bm_rtree represents a bitmap used for one * populated memory zone. */ struct mem_zone_bm_rtree { struct list_head list; /* Link Zones together */ struct list_head nodes; /* Radix Tree inner nodes */ struct list_head leaves; /* Radix Tree leaves */ unsigned long start_pfn; /* Zone start page frame */ unsigned long end_pfn; /* Zone end page frame + 1 */ struct rtree_node *rtree; /* Radix Tree Root */ int levels; /* Number of Radix Tree Levels */ unsigned int blocks; /* Number of Bitmap Blocks */ }; /* strcut bm_position is used for browsing memory bitmaps */ struct bm_position { struct mem_zone_bm_rtree *zone; struct rtree_node *node; unsigned long node_pfn; int node_bit; }; struct memory_bitmap { struct list_head zones; struct linked_page *p_list; /* list of pages used to store zone bitmap objects and bitmap block objects */ struct bm_position cur; /* most recently used bit position */ }; /* Functions that operate on memory bitmaps */ #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long)) #if BITS_PER_LONG == 32 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2) #else #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3) #endif #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1) /** * alloc_rtree_node - Allocate a new node and add it to the radix tree. * * This function is used to allocate inner nodes as well as the * leave nodes of the radix tree. It also adds the node to the * corresponding linked list passed in by the *list parameter. */
static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca, struct list_head *list) { struct rtree_node *node; node = chain_alloc(ca, sizeof(struct rtree_node)); if (!node) return NULL; node->data = get_image_page(gfp_mask, safe_needed); if (!node->data) return NULL; list_add_tail(&node->list, list); return node; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel84100.00%1100.00%
Total84100.00%1100.00%

/** * add_rtree_block - Add a new leave node to the radix tree. * * The leave nodes need to be allocated in order to keep the leaves * linked list in order. This is guaranteed by the zone->blocks * counter. */
static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca) { struct rtree_node *node, *block, **dst; unsigned int levels_needed, block_nr; int i; block_nr = zone->blocks; levels_needed = 0; /* How many levels do we need for this block nr? */ while (block_nr) { levels_needed += 1; block_nr >>= BM_RTREE_LEVEL_SHIFT; } /* Make sure the rtree has enough levels */ for (i = zone->levels; i < levels_needed; i++) { node = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->nodes); if (!node) return -ENOMEM; node->data[0] = (unsigned long)zone->rtree; zone->rtree = node; zone->levels += 1; } /* Allocate new block */ block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); if (!block) return -ENOMEM; /* Now walk the rtree to insert the block */ node = zone->rtree; dst = &zone->rtree; block_nr = zone->blocks; for (i = zone->levels; i > 0; i--) { int index; if (!node) { node = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->nodes); if (!node) return -ENOMEM; *dst = node; } index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); index &= BM_RTREE_LEVEL_MASK; dst = (struct rtree_node **)&((*dst)->data[index]); node = *dst; } zone->blocks += 1; *dst = block; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel299100.00%1100.00%
Total299100.00%1100.00%

static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, int clear_nosave_free); /** * create_zone_bm_rtree - Create a radix tree for one zone. * * Allocated the mem_zone_bm_rtree structure and initializes it. * This function also allocated and builds the radix tree for the * zone. */
static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed, struct chain_allocator *ca, unsigned long start, unsigned long end) { struct mem_zone_bm_rtree *zone; unsigned int i, nr_blocks; unsigned long pages; pages = end - start; zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree)); if (!zone) return NULL; INIT_LIST_HEAD(&zone->nodes); INIT_LIST_HEAD(&zone->leaves); zone->start_pfn = start; zone->end_pfn = end; nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); for (i = 0; i < nr_blocks; i++) { if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) { free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR); return NULL; } } return zone; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel149100.00%1100.00%
Total149100.00%1100.00%

/** * free_zone_bm_rtree - Free the memory of the radix tree. * * Free all node pages of the radix tree. The mem_zone_bm_rtree * structure itself is not freed here nor are the rtree_node * structs. */
static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone, int clear_nosave_free) { struct rtree_node *node; list_for_each_entry(node, &zone->nodes, list) free_image_page(node->data, clear_nosave_free); list_for_each_entry(node, &zone->leaves, list) free_image_page(node->data, clear_nosave_free); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel4584.91%133.33%
Rafael J. Wysocki815.09%266.67%
Total53100.00%3100.00%


static void memory_bm_position_reset(struct memory_bitmap *bm) { bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree, list); bm->cur.node = list_entry(bm->cur.zone->leaves.next, struct rtree_node, list); bm->cur.node_pfn = 0; bm->cur.node_bit = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel5678.87%133.33%
Rafael J. Wysocki1521.13%266.67%
Total71100.00%3100.00%

static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); struct mem_extent { struct list_head hook; unsigned long start; unsigned long end; }; /** * free_mem_extents - Free a list of memory extents. * @list: List of extents to free. */
static void free_mem_extents(struct list_head *list) { struct mem_extent *ext, *aux; list_for_each_entry_safe(ext, aux, list, hook) { list_del(&ext->hook); kfree(ext); } }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki44100.00%3100.00%
Total44100.00%3100.00%

/** * create_mem_extents - Create a list of memory extents. * @list: List to put the extents into. * @gfp_mask: Mask to use for memory allocations. * * The extents represent contiguous ranges of PFNs. */
static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) { struct zone *zone; INIT_LIST_HEAD(list); for_each_populated_zone(zone) { unsigned long zone_start, zone_end; struct mem_extent *ext, *cur, *aux; zone_start = zone->zone_start_pfn; zone_end = zone_end_pfn(zone); list_for_each_entry(ext, list, hook) if (zone_start <= ext->end) break; if (&ext->hook == list || zone_end < ext->start) { /* New extent is necessary */ struct mem_extent *new_ext; new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask); if (!new_ext) { free_mem_extents(list); return -ENOMEM; } new_ext->start = zone_start; new_ext->end = zone_end; list_add_tail(&new_ext->hook, &ext->hook); continue; } /* Merge this zone's range of PFNs with the existing one */ if (zone_start < ext->start) ext->start = zone_start; if (zone_end > ext->end) ext->end = zone_end; /* More merging may be possible */ cur = ext; list_for_each_entry_safe_continue(cur, aux, list, hook) { if (zone_end < cur->start) break; if (zone_end < cur->end) ext->end = cur->end; list_del(&cur->hook); kfree(cur); } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki23898.35%250.00%
Xishi Qiu31.24%125.00%
Motohiro Kosaki10.41%125.00%
Total242100.00%4100.00%

/** * memory_bm_create - Allocate memory for a memory bitmap. */
static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) { struct chain_allocator ca; struct list_head mem_extents; struct mem_extent *ext; int error; chain_init(&ca, gfp_mask, safe_needed); INIT_LIST_HEAD(&bm->zones); error = create_mem_extents(&mem_extents, gfp_mask); if (error) return error; list_for_each_entry(ext, &mem_extents, hook) { struct mem_zone_bm_rtree *zone; zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca, ext->start, ext->end); if (!zone) { error = -ENOMEM; goto Error; } list_add_tail(&zone->list, &bm->zones); } bm->p_list = ca.chain; memory_bm_position_reset(bm); Exit: free_mem_extents(&mem_extents); return error; Error: bm->p_list = ca.chain; memory_bm_free(bm, PG_UNSAFE_CLEAR); goto Exit; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki14582.86%250.00%
Joerg Roedel3017.14%250.00%
Total175100.00%4100.00%

/** * memory_bm_free - Free memory occupied by the memory bitmap. * @bm: Memory bitmap. */
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) { struct mem_zone_bm_rtree *zone; list_for_each_entry(zone, &bm->zones, list) free_zone_bm_rtree(zone, clear_nosave_free); free_list_of_pages(bm->p_list, clear_nosave_free); INIT_LIST_HEAD(&bm->zones); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki2956.86%266.67%
Joerg Roedel2243.14%133.33%
Total51100.00%3100.00%

/** * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap. * * Find the bit in memory bitmap @bm that corresponds to the given PFN. * The cur.zone, cur.block and cur.node_pfn members of @bm are updated. * * Walk the radix tree to find the page containing the bit that represents @pfn * and return the position of the bit in @addr and @bit_nr. */
static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, void **addr, unsigned int *bit_nr) { struct mem_zone_bm_rtree *curr, *zone; struct rtree_node *node; int i, block_nr; zone = bm->cur.zone; if (pfn >= zone->start_pfn && pfn < zone->end_pfn) goto zone_found; zone = NULL; /* Find the right zone */ list_for_each_entry(curr, &bm->zones, list) { if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { zone = curr; break; } } if (!zone) return -EFAULT; zone_found: /* * We have found the zone. Now walk the radix tree to find the leaf node * for our PFN. */ node = bm->cur.node; if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) goto node_found; node = zone->rtree; block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; for (i = zone->levels; i > 0; i--) { int index; index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT); index &= BM_RTREE_LEVEL_MASK; BUG_ON(node->data[index] == 0); node = (struct rtree_node *)node->data[index]; } node_found: /* Update last position */ bm->cur.zone = zone; bm->cur.node = node; bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; /* Set return values */ *addr = node->data; *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel26491.03%233.33%
Rafael J. Wysocki268.97%466.67%
Total290100.00%6100.00%


static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) { void *addr; unsigned int bit; int error; error = memory_bm_find_bit(bm, pfn, &addr, &bit); BUG_ON(error); set_bit(bit, addr); }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki53100.00%2100.00%
Total53100.00%2100.00%


static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) { void *addr; unsigned int bit; int error; error = memory_bm_find_bit(bm, pfn, &addr, &bit); if (!error) set_bit(bit, addr); return error; }

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki56100.00%1100.00%
Total56100.00%1100.00%


static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) { void *addr; unsigned int bit; int error; error = memory_bm_find_bit(bm, pfn, &addr