cregit-Linux how code gets into the kernel

Release 4.8 mm/memory_hotplug.c

Directory: mm
/*
 *  linux/mm/memory_hotplug.c
 *
 *  Copyright (C)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/pagevec.h>
#include <linux/writeback.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/memory.h>
#include <linux/memremap.h>
#include <linux/memory_hotplug.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/migrate.h>
#include <linux/page-isolation.h>
#include <linux/pfn.h>
#include <linux/suspend.h>
#include <linux/mm_inline.h>
#include <linux/firmware-map.h>
#include <linux/stop_machine.h>
#include <linux/hugetlb.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <linux/compaction.h>

#include <asm/tlbflush.h>

#include "internal.h"

/*
 * online_page_callback contains pointer to current page onlining function.
 * Initially it is generic_online_page(). If it is required it could be
 * changed by calling set_online_page_callback() for callback registration
 * and restore_online_page_callback() for generic callback restore.
 */

static void generic_online_page(struct page *page);


static online_page_callback_t online_page_callback = generic_online_page;
static DEFINE_MUTEX(online_page_callback_lock);

/* The same as the cpu_hotplug lock, but for memory hotplug. */
static struct {
	
struct task_struct *active_writer;
	
struct mutex lock; /* Synchronizes accesses to refcount, */
	/*
         * Also blocks the new readers during
         * an ongoing mem hotplug operation.
         */
	
int refcount;

#ifdef CONFIG_DEBUG_LOCK_ALLOC
	
struct lockdep_map dep_map;
#endif
} 
mem_hotplug = {
	.active_writer = NULL,
	.lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
	.refcount = 0,
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	.dep_map = {.name = "mem_hotplug.lock" },
#endif
};

/* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */

#define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)

#define memhp_lock_acquire()      lock_map_acquire(&mem_hotplug.dep_map)

#define memhp_lock_release()      lock_map_release(&mem_hotplug.dep_map)

#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE

bool memhp_auto_online;
#else

bool memhp_auto_online = true;
#endif

EXPORT_SYMBOL_GPL(memhp_auto_online);


static int __init setup_memhp_default_state(char *str) { if (!strcmp(str, "online")) memhp_auto_online = true; else if (!strcmp(str, "offline")) memhp_auto_online = false; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
vitaly kuznetsovvitaly kuznetsov43100.00%1100.00%
Total43100.00%1100.00%

__setup("memhp_default_state=", setup_memhp_default_state);
void get_online_mems(void) { might_sleep(); if (mem_hotplug.active_writer == current) return; memhp_lock_acquire_read(); mutex_lock(&mem_hotplug.lock); mem_hotplug.refcount++; mutex_unlock(&mem_hotplug.lock); }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov43100.00%1100.00%
Total43100.00%1100.00%


void put_online_mems(void) { if (mem_hotplug.active_writer == current) return; mutex_lock(&mem_hotplug.lock); if (WARN_ON(!mem_hotplug.refcount)) mem_hotplug.refcount++; /* try to fix things up */ if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer)) wake_up_process(mem_hotplug.active_writer); mutex_unlock(&mem_hotplug.lock); memhp_lock_release(); }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov73100.00%1100.00%
Total73100.00%1100.00%


void mem_hotplug_begin(void) { mem_hotplug.active_writer = current; memhp_lock_acquire(); for (;;) { mutex_lock(&mem_hotplug.lock); if (likely(!mem_hotplug.refcount)) break; __set_current_state(TASK_UNINTERRUPTIBLE); mutex_unlock(&mem_hotplug.lock); schedule(); } }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov4781.03%150.00%
kosaki motohirokosaki motohiro1118.97%150.00%
Total58100.00%2100.00%


void mem_hotplug_done(void) { mem_hotplug.active_writer = NULL; mutex_unlock(&mem_hotplug.lock); memhp_lock_release(); }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov1354.17%150.00%
kosaki motohirokosaki motohiro1145.83%150.00%
Total24100.00%2100.00%

/* add this memory to iomem resource */
static struct resource *register_memory_resource(u64 start, u64 size) { struct resource *res; res = kzalloc(sizeof(struct resource), GFP_KERNEL); if (!res) return ERR_PTR(-ENOMEM); res->name = "System RAM"; res->start = start; res->end = start + size - 1; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; if (request_resource(&iomem_resource, res) < 0) { pr_debug("System RAM resource %pR cannot be added\n", res); kfree(res); return ERR_PTR(-EEXIST); } return res; }

Contributors

PersonTokensPropCommitsCommitProp
keith manntheykeith mannthey8980.91%116.67%
vitaly kuznetsovvitaly kuznetsov1614.55%116.67%
yasunori gotoyasunori goto21.82%116.67%
toshi kanitoshi kani21.82%233.33%
bjorn helgaasbjorn helgaas10.91%116.67%
Total110100.00%6100.00%


static void release_memory_resource(struct resource *res) { if (!res) return; release_resource(res); kfree(res); return; }

Contributors

PersonTokensPropCommitsCommitProp
keith manntheykeith mannthey28100.00%1100.00%
Total28100.00%1100.00%

#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
void get_page_bootmem(unsigned long info, struct page *page, unsigned long type) { page->lru.next = (struct list_head *) type; SetPagePrivate(page); set_page_private(page, info); page_ref_inc(page); }

Contributors

PersonTokensPropCommitsCommitProp
yasunori gotoyasunori goto3675.00%250.00%
andrea arcangeliandrea arcangeli1122.92%125.00%
joonsoo kimjoonsoo kim12.08%125.00%
Total48100.00%4100.00%


void put_page_bootmem(struct page *page) { unsigned long type; type = (unsigned long) page->lru.next; BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); if (page_ref_dec_return(page) == 1) { ClearPagePrivate(page); set_page_private(page, 0); INIT_LIST_HEAD(&page->lru); free_reserved_page(page); } }

Contributors

PersonTokensPropCommitsCommitProp
yasunori gotoyasunori goto5271.23%240.00%
andrea arcangeliandrea arcangeli1926.03%120.00%
joonsoo kimjoonsoo kim11.37%120.00%
jiang liujiang liu11.37%120.00%
Total73100.00%5100.00%

#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE #ifndef CONFIG_SPARSEMEM_VMEMMAP
static void register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long *usemap, mapsize, section_nr, i; struct mem_section *ms; struct page *page, *memmap; section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); /* Get section's memmap address */ memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); /* * Get page for the memmap's phys address * XXX: need more consideration for sparse_vmemmap... */ page = virt_to_page(memmap); mapsize = sizeof(struct page) * PAGES_PER_SECTION; mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; /* remember memmap's page */ for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, SECTION_INFO); usemap = __nr_to_section(section_nr)->pageblock_flags; page = virt_to_page(usemap); mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, MIX_SECTION_INFO); }

Contributors

PersonTokensPropCommitsCommitProp
yasunori gotoyasunori goto16399.39%266.67%
adrian bunkadrian bunk10.61%133.33%
Total164100.00%3100.00%

#else /* CONFIG_SPARSEMEM_VMEMMAP */
static void register_page_bootmem_info_section(unsigned long start_pfn) { unsigned long *usemap, mapsize, section_nr, i; struct mem_section *ms; struct page *page, *memmap; if (!pfn_valid(start_pfn)) return; section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); usemap = __nr_to_section(section_nr)->pageblock_flags; page = virt_to_page(usemap); mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; for (i = 0; i < mapsize; i++, page++) get_page_bootmem(section_nr, page, MIX_SECTION_INFO); }

Contributors

PersonTokensPropCommitsCommitProp
yasuaki ishimatsuyasuaki ishimatsu128100.00%1100.00%
Total128100.00%1100.00%

#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
void __init register_page_bootmem_info_node(struct pglist_data *pgdat) { unsigned long i, pfn, end_pfn, nr_pages; int node = pgdat->node_id; struct page *page; struct zone *zone; nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; page = virt_to_page(pgdat); for (i = 0; i < nr_pages; i++, page++) get_page_bootmem(node, page, NODE_INFO); zone = &pgdat->node_zones[0]; for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { if (zone_is_initialized(zone)) { nr_pages = zone->wait_table_hash_nr_entries * sizeof(wait_queue_head_t); nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; page = virt_to_page(zone->wait_table); for (i = 0; i < nr_pages; i++, page++) get_page_bootmem(node, page, NODE_INFO); } } pfn = pgdat->node_start_pfn; end_pfn = pgdat_end_pfn(pgdat); /* register section info */ for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { /* * Some platforms can assign the same pfn to multiple nodes - on * node0 as well as nodeN. To avoid registering a pfn against * multiple nodes we check that this pfn does not already * reside in some other nodes. */ if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) register_page_bootmem_info_section(pfn); } }

Contributors

PersonTokensPropCommitsCommitProp
yasunori gotoyasunori goto19687.89%114.29%
qiuxishiqiuxishi177.62%114.29%
xishi qiuxishi qiu31.35%114.29%
cody p schafercody p schafer31.35%114.29%
tang chentang chen20.90%114.29%
yang shiyang shi10.45%114.29%
linus torvaldslinus torvalds10.45%114.29%
Total223100.00%7100.00%

#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { unsigned long old_zone_end_pfn; zone_span_writelock(zone); old_zone_end_pfn = zone_end_pfn(zone); if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) zone->zone_start_pfn = start_pfn; zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - zone->zone_start_pfn; zone_span_writeunlock(zone); }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens6688.00%120.00%
xishi qiuxishi qiu68.00%240.00%
tang chentang chen22.67%120.00%
fabian frederickfabian frederick11.33%120.00%
Total75100.00%5100.00%


static void resize_zone(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { zone_span_writelock(zone); if (end_pfn - start_pfn) { zone->zone_start_pfn = start_pfn; zone->spanned_pages = end_pfn - start_pfn; } else { /* * make it consist as free_area_init_core(), * if spanned_pages = 0, then keep start_pfn = 0 */ zone->zone_start_pfn = 0; zone->spanned_pages = 0; } zone_span_writeunlock(zone); }

Contributors

PersonTokensPropCommitsCommitProp
lai jiangshanlai jiangshan67100.00%2100.00%
Total67100.00%2100.00%


static void fix_zone_id(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { enum zone_type zid = zone_idx(zone); int nid = zone->zone_pgdat->node_id; unsigned long pfn; for (pfn = start_pfn; pfn < end_pfn; pfn++) set_page_links(pfn_to_page(pfn), zid, nid, pfn); }

Contributors

PersonTokensPropCommitsCommitProp
lai jiangshanlai jiangshan68100.00%1100.00%
Total68100.00%1100.00%

/* Can fail with -ENOMEM from allocating a wait table with vmalloc() or * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */
static int __ref ensure_zone_is_initialized(struct zone *zone, unsigned long start_pfn, unsigned long num_pages) { if (!zone_is_initialized(zone)) return init_currently_empty_zone(zone, start_pfn, num_pages); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
cody p schafercody p schafer41100.00%1100.00%
Total41100.00%1100.00%


static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, unsigned long start_pfn, unsigned long end_pfn) { int ret; unsigned long flags; unsigned long z1_start_pfn; ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn); if (ret) return ret; pgdat_resize_lock(z1->zone_pgdat, &flags); /* can't move pfns which are higher than @z2 */ if (end_pfn > zone_end_pfn(z2)) goto out_fail; /* the move out part must be at the left most of @z2 */ if (start_pfn > z2->zone_start_pfn) goto out_fail; /* must included/overlap */ if (end_pfn <= z2->zone_start_pfn) goto out_fail; /* use start_pfn for z1's start_pfn if z1 is empty */ if (!zone_is_empty(z1)) z1_start_pfn = z1->zone_start_pfn; else z1_start_pfn = start_pfn; resize_zone(z1, z1_start_pfn, end_pfn); resize_zone(z2, end_pfn, zone_end_pfn(z2)); pgdat_resize_unlock(z1->zone_pgdat, &flags); fix_zone_id(z1, start_pfn, end_pfn); return 0; out_fail: pgdat_resize_unlock(z1->zone_pgdat, &flags); return -1; }

Contributors

PersonTokensPropCommitsCommitProp
lai jiangshanlai jiangshan17093.41%233.33%
cody p schafercody p schafer73.85%233.33%
xishi qiuxishi qiu42.20%116.67%
jiang liujiang liu10.55%116.67%
Total182100.00%6100.00%


static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, unsigned long start_pfn, unsigned long end_pfn) { int ret; unsigned long flags; unsigned long z2_end_pfn; ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn); if (ret) return ret; pgdat_resize_lock(z1->zone_pgdat, &flags); /* can't move pfns which are lower than @z1 */ if (z1->zone_start_pfn > start_pfn) goto out_fail; /* the move out part mast at the right most of @z1 */ if (zone_end_pfn(z1) > end_pfn) goto out_fail; /* must included/overlap */ if (start_pfn >= zone_end_pfn(z1)) goto out_fail; /* use end_pfn for z2's end_pfn if z2 is empty */ if (!zone_is_empty(z2)) z2_end_pfn = zone_end_pfn(z2); else z2_end_pfn = end_pfn; resize_zone(z1, z1->zone_start_pfn, start_pfn); resize_zone(z2, start_pfn, z2_end_pfn); pgdat_resize_unlock(z1->zone_pgdat, &flags); fix_zone_id(z2, start_pfn, end_pfn); return 0; out_fail: pgdat_resize_unlock(z1->zone_pgdat, &flags); return -1; }

Contributors

PersonTokensPropCommitsCommitProp
lai jiangshanlai jiangshan16992.35%240.00%
cody p schafercody p schafer105.46%240.00%
xishi qiuxishi qiu42.19%120.00%
Total183100.00%5100.00%


static struct zone * __meminit move_pfn_range(int zone_shift, unsigned long start_pfn, unsigned long end_pfn) { struct zone *zone = page_zone(pfn_to_page(start_pfn)); int ret = 0; if (zone_shift < 0) ret = move_pfn_range_left(zone + zone_shift, zone, start_pfn, end_pfn); else if (zone_shift) ret = move_pfn_range_right(zone, zone + zone_shift, start_pfn, end_pfn); if (ret) return NULL; return zone + zone_shift; }

Contributors

PersonTokensPropCommitsCommitProp
reza arbabreza arbab91100.00%1100.00%
Total91100.00%1100.00%


static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, unsigned long end_pfn) { unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat); if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) pgdat->node_start_pfn = start_pfn; pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - pgdat->node_start_pfn; }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens5485.71%125.00%
tang chentang chen57.94%125.00%
xishi qiuxishi qiu34.76%125.00%
fabian frederickfabian frederick11.59%125.00%
Total63100.00%4100.00%


static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) { struct pglist_data *pgdat = zone->zone_pgdat; int nr_pages = PAGES_PER_SECTION; int nid = pgdat->node_id; int zone_type; unsigned long flags, pfn; int ret; zone_type = zone - pgdat->node_zones; ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages); if (ret) return ret; pgdat_resize_lock(zone->zone_pgdat, &flags); grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, phys_start_pfn + nr_pages); pgdat_resize_unlock(zone->zone_pgdat, &flags); memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn, MEMMAP_HOTPLUG); /* online_page_range is called later and expects pages reserved */ for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) { if (!pfn_valid(pfn)) continue; SetPageReserved(pfn_to_page(pfn)); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
dave hansendave hansen5934.71%228.57%
heiko carstensheiko carstens5733.53%114.29%
mel gormanmel gorman3721.76%114.29%
yasunori gotoyasunori goto127.06%114.29%
cody p schafercody p schafer42.35%114.29%
al viroal viro10.59%114.29%
Total170100.00%7100.00%


static int __meminit __add_section(int nid, struct zone *zone, unsigned long phys_start_pfn) { int ret; if (pfn_valid(phys_start_pfn)) return -EEXIST; ret = sparse_add_one_section(zone, phys_start_pfn); if (ret < 0) return ret; ret = __add_zone(zone, phys_start_pfn); if (ret < 0) return ret; return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); }

Contributors

PersonTokensPropCommitsCommitProp
dave hansendave hansen5265.00%120.00%
yasunori gotoyasunori goto1113.75%120.00%
kamezawa hiroyukikamezawa hiroyuki1113.75%120.00%
gary hadegary hade56.25%120.00%
al viroal viro11.25%120.00%
Total80100.00%5100.00%

/* * Reasonably generic function for adding memory. It is * expected that archs that support memory hotplug will * call this function after deciding the zone to which to * add the new pages. */
int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, unsigned long nr_pages) { unsigned long i; int err = 0; int start_sec, end_sec; struct vmem_altmap *altmap; clear_zone_contiguous(zone); /* during initialize mem_map, align hot-added range to section */ start_sec = pfn_to_section_nr(phys_start_pfn); end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); altmap = to_vmem_altmap((unsigned long) pfn_to_page(phys_start_pfn)); if (altmap) { /* * Validate altmap is within bounds of the total request */ if (altmap->base_pfn != phys_start_pfn || vmem_altmap_offset(altmap) > nr_pages) { pr_warn_once("memory add fail, invalid altmap\n"); err = -EINVAL; goto out; } altmap->alloc = 0; } for (i = start_sec; i <= end_sec; i++) { err = __add_section(nid, zone, section_nr_to_pfn(i)); /* * EEXIST is finally dealt with by ioresource collision * check. see add_memory() => register_memory_resource() * Warning will be printed if there is collision. */ if (err && (err != -EEXIST)) break; err = 0; } vmemmap_populate_print_last(); out: set_zone_contiguous(zone); return err; }

Contributors

PersonTokensPropCommitsCommitProp
david rientjesdavid rientjes10155.80%120.00%
dan williamsdan williams5630.94%120.00%
joonsoo kimjoonsoo kim189.94%120.00%
sheng yongsheng yong31.66%120.00%
zhu guihuazhu guihua31.66%120.00%
Total181100.00%5100.00%

EXPORT_SYMBOL_GPL(__add_pages); #ifdef CONFIG_MEMORY_HOTREMOVE /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
static int find_smallest_section_pfn(int nid, struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { struct mem_section *ms; for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { ms = __pfn_to_section(start_pfn); if (unlikely(!valid_section(ms))) continue; if (unlikely(pfn_to_nid(start_pfn) != nid)) continue; if (zone && zone != page_zone(pfn_to_page(start_pfn))) continue; return start_pfn; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
yasuaki ishimatsuyasuaki ishimatsu93100.00%1100.00%
Total93100.00%1100.00%

/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
static int find_biggest_section_pfn(int nid, struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { struct mem_section *ms; unsigned long pfn; /* pfn is the end pfn of a memory section. */ pfn = end_pfn - 1; for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { ms = __pfn_to_section(pfn); if (unlikely(!valid_section(ms))) continue; if (unlikely(pfn_to_nid(pfn) != nid)) continue; if (zone && zone != page_zone(pfn_to_page(pfn))) continue; return pfn; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
yasuaki ishimatsuyasuaki ishimatsu104100.00%1100.00%
Total104100.00%1100.00%


static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, unsigned long end_pfn) { unsigned long zone_start_pfn = zone->zone_start_pfn; unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ unsigned long zone_end_pfn = z; unsigned long pfn; struct mem_section *ms; int nid = zone_to_nid(zone); zone_span_writelock(zone); if (zone_start_pfn == start_pfn) { /* * If the section is smallest section in the zone, it need * shrink zone->zone_start_pfn and zone->zone_spanned_pages. * In this case, we find second smallest valid mem_section * for shrinking zone. */ pfn = find_smallest_section_pfn(nid, zone, end_pfn, zone_end_pfn); if (pfn) { zone->zone_start_pfn = pfn; zone->spanned_pages = zone_end_pfn - pfn; } } else if (zone_end_pfn == end_pfn) { /* * If the section is biggest section in the zone, it need * shrink zone->spanned_pages. * In this case, we find second biggest valid mem_section for * shrinking zone. */ pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, start_pfn