Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Karolina Drobnik | 4919 | 60.83% | 10 | 40.00% |
Rebecca Mckeever | 1889 | 23.36% | 8 | 32.00% |
Wei Yang | 844 | 10.44% | 4 | 16.00% |
Shaoqin Huang | 431 | 5.33% | 2 | 8.00% |
Mike Rapoport | 3 | 0.04% | 1 | 4.00% |
Total | 8086 | 25 |
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449
// SPDX-License-Identifier: GPL-2.0-or-later #include "basic_api.h" #include <string.h> #include <linux/memblock.h> #define EXPECTED_MEMBLOCK_REGIONS 128 #define FUNC_ADD "memblock_add" #define FUNC_RESERVE "memblock_reserve" #define FUNC_REMOVE "memblock_remove" #define FUNC_FREE "memblock_free" #define FUNC_TRIM "memblock_trim_memory" static int memblock_initialization_check(void) { PREFIX_PUSH(); ASSERT_NE(memblock.memory.regions, NULL); ASSERT_EQ(memblock.memory.cnt, 0); ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0); ASSERT_NE(memblock.reserved.regions, NULL); ASSERT_EQ(memblock.reserved.cnt, 0); ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0); ASSERT_EQ(memblock.bottom_up, false); ASSERT_EQ(memblock.current_limit, MEMBLOCK_ALLOC_ANYWHERE); test_pass_pop(); return 0; } /* * A simple test that adds a memory block of a specified base address * and size to the collection of available memory regions (memblock.memory). * Expect to create a new entry. The region counter and total memory get * updated. */ static int memblock_add_simple_check(void) { struct memblock_region *rgn; rgn = &memblock.memory.regions[0]; struct region r = { .base = SZ_1G, .size = SZ_4M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r.base, r.size); ASSERT_EQ(rgn->base, r.base); ASSERT_EQ(rgn->size, r.size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, r.size); test_pass_pop(); return 0; } /* * A simple test that adds a memory block of a specified base address, size, * NUMA node and memory flags to the collection of available memory regions. * Expect to create a new entry. The region counter and total memory get * updated. */ static int memblock_add_node_simple_check(void) { struct memblock_region *rgn; rgn = &memblock.memory.regions[0]; struct region r = { .base = SZ_1M, .size = SZ_16M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add_node(r.base, r.size, 1, MEMBLOCK_HOTPLUG); ASSERT_EQ(rgn->base, r.base); ASSERT_EQ(rgn->size, r.size); #ifdef CONFIG_NUMA ASSERT_EQ(rgn->nid, 1); #endif ASSERT_EQ(rgn->flags, MEMBLOCK_HOTPLUG); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, r.size); test_pass_pop(); return 0; } /* * A test that tries to add two memory blocks that don't overlap with one * another: * * | +--------+ +--------+ | * | | r1 | | r2 | | * +--------+--------+--------+--------+--+ * * Expect to add two correctly initialized entries to the collection of * available memory regions (memblock.memory). The total size and * region counter fields get updated. */ static int memblock_add_disjoint_check(void) { struct memblock_region *rgn1, *rgn2; rgn1 = &memblock.memory.regions[0]; rgn2 = &memblock.memory.regions[1]; struct region r1 = { .base = SZ_1G, .size = SZ_8K }; struct region r2 = { .base = SZ_1G + SZ_16K, .size = SZ_8K }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(rgn1->size, r1.size); ASSERT_EQ(rgn2->base, r2.base); ASSERT_EQ(rgn2->size, r2.size); ASSERT_EQ(memblock.memory.cnt, 2); ASSERT_EQ(memblock.memory.total_size, r1.size + r2.size); test_pass_pop(); return 0; } /* * A test that tries to add two memory blocks r1 and r2, where r2 overlaps * with the beginning of r1 (that is r1.base < r2.base + r2.size): * * | +----+----+------------+ | * | | |r2 | r1 | | * +----+----+----+------------+----------+ * ^ ^ * | | * | r1.base * | * r2.base * * Expect to merge the two entries into one region that starts at r2.base * and has size of two regions minus their intersection. The total size of * the available memory is updated, and the region counter stays the same. */ static int memblock_add_overlap_top_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = SZ_512M, .size = SZ_1G }; struct region r2 = { .base = SZ_256M, .size = SZ_512M }; PREFIX_PUSH(); total_size = (r1.base - r2.base) + r1.size; reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); ASSERT_EQ(rgn->base, r2.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to add two memory blocks r1 and r2, where r2 overlaps * with the end of r1 (that is r2.base < r1.base + r1.size): * * | +--+------+----------+ | * | | | r1 | r2 | | * +--+--+------+----------+--------------+ * ^ ^ * | | * | r2.base * | * r1.base * * Expect to merge the two entries into one region that starts at r1.base * and has size of two regions minus their intersection. The total size of * the available memory is updated, and the region counter stays the same. */ static int memblock_add_overlap_bottom_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = SZ_128M, .size = SZ_512M }; struct region r2 = { .base = SZ_256M, .size = SZ_1G }; PREFIX_PUSH(); total_size = (r2.base - r1.base) + r2.size; reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to add two memory blocks r1 and r2, where r2 is * within the range of r1 (that is r1.base < r2.base && * r2.base + r2.size < r1.base + r1.size): * * | +-------+--+-----------------------+ * | | |r2| r1 | * +---+-------+--+-----------------------+ * ^ * | * r1.base * * Expect to merge two entries into one region that stays the same. * The counter and total size of available memory are not updated. */ static int memblock_add_within_check(void) { struct memblock_region *rgn; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = SZ_8M, .size = SZ_32M }; struct region r2 = { .base = SZ_16M, .size = SZ_1M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, r1.size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, r1.size); test_pass_pop(); return 0; } /* * A simple test that tries to add the same memory block twice. Expect * the counter and total size of available memory to not be updated. */ static int memblock_add_twice_check(void) { struct region r = { .base = SZ_16K, .size = SZ_2M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r.base, r.size); memblock_add(r.base, r.size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, r.size); test_pass_pop(); return 0; } /* * A test that tries to add two memory blocks that don't overlap with one * another and then add a third memory block in the space between the first two: * * | +--------+--------+--------+ | * | | r1 | r3 | r2 | | * +--------+--------+--------+--------+--+ * * Expect to merge the three entries into one region that starts at r1.base * and has size of r1.size + r2.size + r3.size. The region counter and total * size of the available memory are updated. */ static int memblock_add_between_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = SZ_1G, .size = SZ_8K }; struct region r2 = { .base = SZ_1G + SZ_16K, .size = SZ_8K }; struct region r3 = { .base = SZ_1G + SZ_8K, .size = SZ_8K }; PREFIX_PUSH(); total_size = r1.size + r2.size + r3.size; reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); memblock_add(r3.base, r3.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, total_size); test_pass_pop(); return 0; } /* * A simple test that tries to add a memory block r when r extends past * PHYS_ADDR_MAX: * * +--------+ * | r | * +--------+ * | +----+ * | | rgn| * +----------------------------+----+ * * Expect to add a memory block of size PHYS_ADDR_MAX - r.base. Expect the * total size of available memory and the counter to be updated. */ static int memblock_add_near_max_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.memory.regions[0]; struct region r = { .base = PHYS_ADDR_MAX - SZ_1M, .size = SZ_2M }; PREFIX_PUSH(); total_size = PHYS_ADDR_MAX - r.base; reset_memblock_regions(); memblock_add(r.base, r.size); ASSERT_EQ(rgn->base, r.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, total_size); test_pass_pop(); return 0; } /* * A test that trying to add the 129th memory block. * Expect to trigger memblock_double_array() to double the * memblock.memory.max, find a new valid memory as * memory.regions. */ static int memblock_add_many_check(void) { int i; void *orig_region; struct region r = { .base = SZ_16K, .size = SZ_16K, }; phys_addr_t new_memory_regions_size; phys_addr_t base, size = SZ_64; phys_addr_t gap_size = SZ_64; PREFIX_PUSH(); reset_memblock_regions(); memblock_allow_resize(); dummy_physical_memory_init(); /* * We allocated enough memory by using dummy_physical_memory_init(), and * split it into small block. First we split a large enough memory block * as the memory region which will be choosed by memblock_double_array(). */ base = PAGE_ALIGN(dummy_physical_memory_base()); new_memory_regions_size = PAGE_ALIGN(INIT_MEMBLOCK_REGIONS * 2 * sizeof(struct memblock_region)); memblock_add(base, new_memory_regions_size); /* This is the base of small memory block. */ base += new_memory_regions_size + gap_size; orig_region = memblock.memory.regions; for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) { /* * Add these small block to fulfill the memblock. We keep a * gap between the nearby memory to avoid being merged. */ memblock_add(base, size); base += size + gap_size; ASSERT_EQ(memblock.memory.cnt, i + 2); ASSERT_EQ(memblock.memory.total_size, new_memory_regions_size + (i + 1) * size); } /* * At there, memblock_double_array() has been succeed, check if it * update the memory.max. */ ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2); /* memblock_double_array() will reserve the memory it used. Check it. */ ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, new_memory_regions_size); /* * Now memblock_double_array() works fine. Let's check after the * double_array(), the memblock_add() still works as normal. */ memblock_add(r.base, r.size); ASSERT_EQ(memblock.memory.regions[0].base, r.base); ASSERT_EQ(memblock.memory.regions[0].size, r.size); ASSERT_EQ(memblock.memory.cnt, INIT_MEMBLOCK_REGIONS + 2); ASSERT_EQ(memblock.memory.total_size, INIT_MEMBLOCK_REGIONS * size + new_memory_regions_size + r.size); ASSERT_EQ(memblock.memory.max, INIT_MEMBLOCK_REGIONS * 2); dummy_physical_memory_cleanup(); /* * The current memory.regions is occupying a range of memory that * allocated from dummy_physical_memory_init(). After free the memory, * we must not use it. So restore the origin memory region to make sure * the tests can run as normal and not affected by the double array. */ memblock.memory.regions = orig_region; memblock.memory.cnt = INIT_MEMBLOCK_REGIONS; test_pass_pop(); return 0; } static int memblock_add_checks(void) { prefix_reset(); prefix_push(FUNC_ADD); test_print("Running %s tests...\n", FUNC_ADD); memblock_add_simple_check(); memblock_add_node_simple_check(); memblock_add_disjoint_check(); memblock_add_overlap_top_check(); memblock_add_overlap_bottom_check(); memblock_add_within_check(); memblock_add_twice_check(); memblock_add_between_check(); memblock_add_near_max_check(); memblock_add_many_check(); prefix_pop(); return 0; } /* * A simple test that marks a memory block of a specified base address * and size as reserved and to the collection of reserved memory regions * (memblock.reserved). Expect to create a new entry. The region counter * and total memory size are updated. */ static int memblock_reserve_simple_check(void) { struct memblock_region *rgn; rgn = &memblock.reserved.regions[0]; struct region r = { .base = SZ_2G, .size = SZ_128M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_reserve(r.base, r.size); ASSERT_EQ(rgn->base, r.base); ASSERT_EQ(rgn->size, r.size); test_pass_pop(); return 0; } /* * A test that tries to mark two memory blocks that don't overlap as reserved: * * | +--+ +----------------+ | * | |r1| | r2 | | * +--------+--+------+----------------+--+ * * Expect to add two entries to the collection of reserved memory regions * (memblock.reserved). The total size and region counter for * memblock.reserved are updated. */ static int memblock_reserve_disjoint_check(void) { struct memblock_region *rgn1, *rgn2; rgn1 = &memblock.reserved.regions[0]; rgn2 = &memblock.reserved.regions[1]; struct region r1 = { .base = SZ_256M, .size = SZ_16M }; struct region r2 = { .base = SZ_512M, .size = SZ_512M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(rgn1->size, r1.size); ASSERT_EQ(rgn2->base, r2.base); ASSERT_EQ(rgn2->size, r2.size); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, r1.size + r2.size); test_pass_pop(); return 0; } /* * A test that tries to mark two memory blocks r1 and r2 as reserved, * where r2 overlaps with the beginning of r1 (that is * r1.base < r2.base + r2.size): * * | +--------------+--+--------------+ | * | | r2 | | r1 | | * +--+--------------+--+--------------+--+ * ^ ^ * | | * | r1.base * | * r2.base * * Expect to merge two entries into one region that starts at r2.base and * has size of two regions minus their intersection. The total size of the * reserved memory is updated, and the region counter is not updated. */ static int memblock_reserve_overlap_top_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = SZ_1G, .size = SZ_1G }; struct region r2 = { .base = SZ_128M, .size = SZ_1G }; PREFIX_PUSH(); total_size = (r1.base - r2.base) + r1.size; reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); ASSERT_EQ(rgn->base, r2.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to mark two memory blocks r1 and r2 as reserved, * where r2 overlaps with the end of r1 (that is * r2.base < r1.base + r1.size): * * | +--------------+--+--------------+ | * | | r1 | | r2 | | * +--+--------------+--+--------------+--+ * ^ ^ * | | * | r2.base * | * r1.base * * Expect to merge two entries into one region that starts at r1.base and * has size of two regions minus their intersection. The total size of the * reserved memory is updated, and the region counter is not updated. */ static int memblock_reserve_overlap_bottom_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = SZ_2K, .size = SZ_128K }; struct region r2 = { .base = SZ_128K, .size = SZ_128K }; PREFIX_PUSH(); total_size = (r2.base - r1.base) + r2.size; reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to mark two memory blocks r1 and r2 as reserved, * where r2 is within the range of r1 (that is * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): * * | +-----+--+---------------------------| * | | |r2| r1 | * +-+-----+--+---------------------------+ * ^ ^ * | | * | r2.base * | * r1.base * * Expect to merge two entries into one region that stays the same. The * counter and total size of available memory are not updated. */ static int memblock_reserve_within_check(void) { struct memblock_region *rgn; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = SZ_1M, .size = SZ_8M }; struct region r2 = { .base = SZ_2M, .size = SZ_64K }; PREFIX_PUSH(); reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, r1.size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, r1.size); test_pass_pop(); return 0; } /* * A simple test that tries to reserve the same memory block twice. * Expect the region counter and total size of reserved memory to not * be updated. */ static int memblock_reserve_twice_check(void) { struct region r = { .base = SZ_16K, .size = SZ_2M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_reserve(r.base, r.size); memblock_reserve(r.base, r.size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, r.size); test_pass_pop(); return 0; } /* * A test that tries to mark two memory blocks that don't overlap as reserved * and then reserve a third memory block in the space between the first two: * * | +--------+--------+--------+ | * | | r1 | r3 | r2 | | * +--------+--------+--------+--------+--+ * * Expect to merge the three entries into one reserved region that starts at * r1.base and has size of r1.size + r2.size + r3.size. The region counter and * total for memblock.reserved are updated. */ static int memblock_reserve_between_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = SZ_1G, .size = SZ_8K }; struct region r2 = { .base = SZ_1G + SZ_16K, .size = SZ_8K }; struct region r3 = { .base = SZ_1G + SZ_8K, .size = SZ_8K }; PREFIX_PUSH(); total_size = r1.size + r2.size + r3.size; reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); memblock_reserve(r3.base, r3.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A simple test that tries to reserve a memory block r when r extends past * PHYS_ADDR_MAX: * * +--------+ * | r | * +--------+ * | +----+ * | | rgn| * +----------------------------+----+ * * Expect to reserve a memory block of size PHYS_ADDR_MAX - r.base. Expect the * total size of reserved memory and the counter to be updated. */ static int memblock_reserve_near_max_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.reserved.regions[0]; struct region r = { .base = PHYS_ADDR_MAX - SZ_1M, .size = SZ_2M }; PREFIX_PUSH(); total_size = PHYS_ADDR_MAX - r.base; reset_memblock_regions(); memblock_reserve(r.base, r.size); ASSERT_EQ(rgn->base, r.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that trying to reserve the 129th memory block. * Expect to trigger memblock_double_array() to double the * memblock.memory.max, find a new valid memory as * reserved.regions. */ static int memblock_reserve_many_check(void) { int i; void *orig_region; struct region r = { .base = SZ_16K, .size = SZ_16K, }; phys_addr_t memory_base = SZ_128K; phys_addr_t new_reserved_regions_size; PREFIX_PUSH(); reset_memblock_regions(); memblock_allow_resize(); /* Add a valid memory region used by double_array(). */ dummy_physical_memory_init(); memblock_add(dummy_physical_memory_base(), MEM_SIZE); for (i = 0; i < INIT_MEMBLOCK_REGIONS; i++) { /* Reserve some fakes memory region to fulfill the memblock. */ memblock_reserve(memory_base, MEM_SIZE); ASSERT_EQ(memblock.reserved.cnt, i + 1); ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE); /* Keep the gap so these memory region will not be merged. */ memory_base += MEM_SIZE * 2; } orig_region = memblock.reserved.regions; /* This reserve the 129 memory_region, and makes it double array. */ memblock_reserve(memory_base, MEM_SIZE); /* * This is the memory region size used by the doubled reserved.regions, * and it has been reserved due to it has been used. The size is used to * calculate the total_size that the memblock.reserved have now. */ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) * sizeof(struct memblock_region)); /* * The double_array() will find a free memory region as the new * reserved.regions, and the used memory region will be reserved, so * there will be one more region exist in the reserved memblock. And the * one more reserved region's size is new_reserved_regions_size. */ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2); ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + new_reserved_regions_size); ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); /* * Now memblock_double_array() works fine. Let's check after the * double_array(), the memblock_reserve() still works as normal. */ memblock_reserve(r.base, r.size); ASSERT_EQ(memblock.reserved.regions[0].base, r.base); ASSERT_EQ(memblock.reserved.regions[0].size, r.size); ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3); ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + new_reserved_regions_size + r.size); ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); dummy_physical_memory_cleanup(); /* * The current reserved.regions is occupying a range of memory that * allocated from dummy_physical_memory_init(). After free the memory, * we must not use it. So restore the origin memory region to make sure * the tests can run as normal and not affected by the double array. */ memblock.reserved.regions = orig_region; memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS; test_pass_pop(); return 0; } /* * A test that trying to reserve the 129th memory block at all locations. * Expect to trigger memblock_double_array() to double the * memblock.memory.max, find a new valid memory as reserved.regions. * * 0 1 2 128 * +-------+ +-------+ +-------+ +-------+ * | 32K | | 32K | | 32K | ... | 32K | * +-------+-------+-------+-------+-------+ +-------+ * |<-32K->| |<-32K->| * */ /* Keep the gap so these memory region will not be merged. */ #define MEMORY_BASE(idx) (SZ_128K + (MEM_SIZE * 2) * (idx)) static int memblock_reserve_all_locations_check(void) { int i, skip; void *orig_region; struct region r = { .base = SZ_16K, .size = SZ_16K, }; phys_addr_t new_reserved_regions_size; PREFIX_PUSH(); /* Reserve the 129th memory block for all possible positions*/ for (skip = 0; skip < INIT_MEMBLOCK_REGIONS + 1; skip++) { reset_memblock_regions(); memblock_allow_resize(); /* Add a valid memory region used by double_array(). */ dummy_physical_memory_init(); memblock_add(dummy_physical_memory_base(), MEM_SIZE); for (i = 0; i < INIT_MEMBLOCK_REGIONS + 1; i++) { if (i == skip) continue; /* Reserve some fakes memory region to fulfill the memblock. */ memblock_reserve(MEMORY_BASE(i), MEM_SIZE); if (i < skip) { ASSERT_EQ(memblock.reserved.cnt, i + 1); ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE); } else { ASSERT_EQ(memblock.reserved.cnt, i); ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE); } } orig_region = memblock.reserved.regions; /* This reserve the 129 memory_region, and makes it double array. */ memblock_reserve(MEMORY_BASE(skip), MEM_SIZE); /* * This is the memory region size used by the doubled reserved.regions, * and it has been reserved due to it has been used. The size is used to * calculate the total_size that the memblock.reserved have now. */ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) * sizeof(struct memblock_region)); /* * The double_array() will find a free memory region as the new * reserved.regions, and the used memory region will be reserved, so * there will be one more region exist in the reserved memblock. And the * one more reserved region's size is new_reserved_regions_size. */ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2); ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + new_reserved_regions_size); ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); /* * Now memblock_double_array() works fine. Let's check after the * double_array(), the memblock_reserve() still works as normal. */ memblock_reserve(r.base, r.size); ASSERT_EQ(memblock.reserved.regions[0].base, r.base); ASSERT_EQ(memblock.reserved.regions[0].size, r.size); ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3); ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + new_reserved_regions_size + r.size); ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); dummy_physical_memory_cleanup(); /* * The current reserved.regions is occupying a range of memory that * allocated from dummy_physical_memory_init(). After free the memory, * we must not use it. So restore the origin memory region to make sure * the tests can run as normal and not affected by the double array. */ memblock.reserved.regions = orig_region; memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS; } test_pass_pop(); return 0; } /* * A test that trying to reserve the 129th memory block at all locations. * Expect to trigger memblock_double_array() to double the * memblock.memory.max, find a new valid memory as reserved.regions. And make * sure it doesn't conflict with the range we want to reserve. * * For example, we have 128 regions in reserved and now want to reserve * the skipped one. Since reserved is full, memblock_double_array() would find * an available range in memory for the new array. We intended to put two * ranges in memory with one is the exact range of the skipped one. Before * commit 48c3b583bbdd ("mm/memblock: fix overlapping allocation when doubling * reserved array"), the new array would sits in the skipped range which is a * conflict. The expected new array should be allocated from memory.regions[0]. * * 0 1 * memory +-------+ +-------+ * | 32K | | 32K | * +-------+ ------+-------+-------+-------+ * |<-32K->|<-32K->|<-32K->| * * 0 skipped 127 * reserved +-------+ ......... +-------+ * | 32K | . 32K . ... | 32K | * +-------+-------+-------+ +-------+ * |<-32K->| * ^ * | * | * skipped one */ /* Keep the gap so these memory region will not be merged. */ #define MEMORY_BASE_OFFSET(idx, offset) ((offset) + (MEM_SIZE * 2) * (idx)) static int memblock_reserve_many_may_conflict_check(void) { int i, skip; void *orig_region; struct region r = { .base = SZ_16K, .size = SZ_16K, }; phys_addr_t new_reserved_regions_size; /* * 0 1 129 * +---+ +---+ +---+ * |32K| |32K| .. |32K| * +---+ +---+ +---+ * * Pre-allocate the range for 129 memory block + one range for double * memblock.reserved.regions at idx 0. */ dummy_physical_memory_init(); phys_addr_t memory_base = dummy_physical_memory_base(); phys_addr_t offset = PAGE_ALIGN(memory_base); PREFIX_PUSH(); /* Reserve the 129th memory block for all possible positions*/ for (skip = 1; skip <= INIT_MEMBLOCK_REGIONS + 1; skip++) { reset_memblock_regions(); memblock_allow_resize(); reset_memblock_attributes(); /* Add a valid memory region used by double_array(). */ memblock_add(MEMORY_BASE_OFFSET(0, offset), MEM_SIZE); /* * Add a memory region which will be reserved as 129th memory * region. This is not expected to be used by double_array(). */ memblock_add(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE); for (i = 1; i <= INIT_MEMBLOCK_REGIONS + 1; i++) { if (i == skip) continue; /* Reserve some fakes memory region to fulfill the memblock. */ memblock_reserve(MEMORY_BASE_OFFSET(i, offset), MEM_SIZE); if (i < skip) { ASSERT_EQ(memblock.reserved.cnt, i); ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE); } else { ASSERT_EQ(memblock.reserved.cnt, i - 1); ASSERT_EQ(memblock.reserved.total_size, (i - 1) * MEM_SIZE); } } orig_region = memblock.reserved.regions; /* This reserve the 129 memory_region, and makes it double array. */ memblock_reserve(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE); /* * This is the memory region size used by the doubled reserved.regions, * and it has been reserved due to it has been used. The size is used to * calculate the total_size that the memblock.reserved have now. */ new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) * sizeof(struct memblock_region)); /* * The double_array() will find a free memory region as the new * reserved.regions, and the used memory region will be reserved, so * there will be one more region exist in the reserved memblock. And the * one more reserved region's size is new_reserved_regions_size. */ ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2); ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + new_reserved_regions_size); ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); /* * The first reserved region is allocated for double array * with the size of new_reserved_regions_size and the base to be * MEMORY_BASE_OFFSET(0, offset) + SZ_32K - new_reserved_regions_size */ ASSERT_EQ(memblock.reserved.regions[0].base + memblock.reserved.regions[0].size, MEMORY_BASE_OFFSET(0, offset) + SZ_32K); ASSERT_EQ(memblock.reserved.regions[0].size, new_reserved_regions_size); /* * Now memblock_double_array() works fine. Let's check after the * double_array(), the memblock_reserve() still works as normal. */ memblock_reserve(r.base, r.size); ASSERT_EQ(memblock.reserved.regions[0].base, r.base); ASSERT_EQ(memblock.reserved.regions[0].size, r.size); ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3); ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + new_reserved_regions_size + r.size); ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); /* * The current reserved.regions is occupying a range of memory that * allocated from dummy_physical_memory_init(). After free the memory, * we must not use it. So restore the origin memory region to make sure * the tests can run as normal and not affected by the double array. */ memblock.reserved.regions = orig_region; memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS; } dummy_physical_memory_cleanup(); test_pass_pop(); return 0; } static int memblock_reserve_checks(void) { prefix_reset(); prefix_push(FUNC_RESERVE); test_print("Running %s tests...\n", FUNC_RESERVE); memblock_reserve_simple_check(); memblock_reserve_disjoint_check(); memblock_reserve_overlap_top_check(); memblock_reserve_overlap_bottom_check(); memblock_reserve_within_check(); memblock_reserve_twice_check(); memblock_reserve_between_check(); memblock_reserve_near_max_check(); memblock_reserve_many_check(); memblock_reserve_all_locations_check(); memblock_reserve_many_may_conflict_check(); prefix_pop(); return 0; } /* * A simple test that tries to remove a region r1 from the array of * available memory regions. By "removing" a region we mean overwriting it * with the next region r2 in memblock.memory: * * | ...... +----------------+ | * | : r1 : | r2 | | * +--+----+----------+----------------+--+ * ^ * | * rgn.base * * Expect to add two memory blocks r1 and r2 and then remove r1 so that * r2 is the first available region. The region counter and total size * are updated. */ static int memblock_remove_simple_check(void) { struct memblock_region *rgn; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = SZ_2K, .size = SZ_4K }; struct region r2 = { .base = SZ_128K, .size = SZ_4M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); memblock_remove(r1.base, r1.size); ASSERT_EQ(rgn->base, r2.base); ASSERT_EQ(rgn->size, r2.size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, r2.size); test_pass_pop(); return 0; } /* * A test that tries to remove a region r2 that was not registered as * available memory (i.e. has no corresponding entry in memblock.memory): * * +----------------+ * | r2 | * +----------------+ * | +----+ | * | | r1 | | * +--+----+------------------------------+ * ^ * | * rgn.base * * Expect the array, regions counter and total size to not be modified. */ static int memblock_remove_absent_check(void) { struct memblock_region *rgn; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = SZ_512K, .size = SZ_4M }; struct region r2 = { .base = SZ_64M, .size = SZ_1G }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_remove(r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, r1.size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, r1.size); test_pass_pop(); return 0; } /* * A test that tries to remove a region r2 that overlaps with the * beginning of the already existing entry r1 * (that is r1.base < r2.base + r2.size): * * +-----------------+ * | r2 | * +-----------------+ * | .........+--------+ | * | : r1 | rgn | | * +-----------------+--------+--------+--+ * ^ ^ * | | * | rgn.base * r1.base * * Expect that only the intersection of both regions is removed from the * available memory pool. The regions counter and total size are updated. */ static int memblock_remove_overlap_top_check(void) { struct memblock_region *rgn; phys_addr_t r1_end, r2_end, total_size; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = SZ_32M, .size = SZ_32M }; struct region r2 = { .base = SZ_16M, .size = SZ_32M }; PREFIX_PUSH(); r1_end = r1.base + r1.size; r2_end = r2.base + r2.size; total_size = r1_end - r2_end; reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_remove(r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base + r2.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to remove a region r2 that overlaps with the end of * the already existing region r1 (that is r2.base < r1.base + r1.size): * * +--------------------------------+ * | r2 | * +--------------------------------+ * | +---+..... | * | |rgn| r1 : | * +-+---+----+---------------------------+ * ^ * | * r1.base * * Expect that only the intersection of both regions is removed from the * available memory pool. The regions counter and total size are updated. */ static int memblock_remove_overlap_bottom_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = SZ_2M, .size = SZ_64M }; struct region r2 = { .base = SZ_32M, .size = SZ_256M }; PREFIX_PUSH(); total_size = r2.base - r1.base; reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_remove(r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to remove a region r2 that is within the range of * the already existing entry r1 (that is * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): * * +----+ * | r2 | * +----+ * | +-------------+....+---------------+ | * | | rgn1 | r1 | rgn2 | | * +-+-------------+----+---------------+-+ * ^ * | * r1.base * * Expect that the region is split into two - one that ends at r2.base and * another that starts at r2.base + r2.size, with appropriate sizes. The * region counter and total size are updated. */ static int memblock_remove_within_check(void) { struct memblock_region *rgn1, *rgn2; phys_addr_t r1_size, r2_size, total_size; rgn1 = &memblock.memory.regions[0]; rgn2 = &memblock.memory.regions[1]; struct region r1 = { .base = SZ_1M, .size = SZ_32M }; struct region r2 = { .base = SZ_16M, .size = SZ_1M }; PREFIX_PUSH(); r1_size = r2.base - r1.base; r2_size = (r1.base + r1.size) - (r2.base + r2.size); total_size = r1_size + r2_size; reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_remove(r2.base, r2.size); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(rgn1->size, r1_size); ASSERT_EQ(rgn2->base, r2.base + r2.size); ASSERT_EQ(rgn2->size, r2_size); ASSERT_EQ(memblock.memory.cnt, 2); ASSERT_EQ(memblock.memory.total_size, total_size); test_pass_pop(); return 0; } /* * A simple test that tries to remove a region r1 from the array of * available memory regions when r1 is the only available region. * Expect to add a memory block r1 and then remove r1 so that a dummy * region is added. The region counter stays the same, and the total size * is updated. */ static int memblock_remove_only_region_check(void) { struct memblock_region *rgn; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = SZ_2K, .size = SZ_4K }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_remove(r1.base, r1.size); ASSERT_EQ(rgn->base, 0); ASSERT_EQ(rgn->size, 0); ASSERT_EQ(memblock.memory.cnt, 0); ASSERT_EQ(memblock.memory.total_size, 0); test_pass_pop(); return 0; } /* * A simple test that tries remove a region r2 from the array of available * memory regions when r2 extends past PHYS_ADDR_MAX: * * +--------+ * | r2 | * +--------+ * | +---+....+ * | |rgn| | * +------------------------+---+----+ * * Expect that only the portion between PHYS_ADDR_MAX and r2.base is removed. * Expect the total size of available memory to be updated and the counter to * not be updated. */ static int memblock_remove_near_max_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = PHYS_ADDR_MAX - SZ_2M, .size = SZ_2M }; struct region r2 = { .base = PHYS_ADDR_MAX - SZ_1M, .size = SZ_2M }; PREFIX_PUSH(); total_size = r1.size - (PHYS_ADDR_MAX - r2.base); reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_remove(r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.memory.cnt, 1); ASSERT_EQ(memblock.memory.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to remove a region r3 that overlaps with two existing * regions r1 and r2: * * +----------------+ * | r3 | * +----------------+ * | +----+..... ........+--------+ * | | |r1 : : |r2 | | * +----+----+----+---+-------+--------+-----+ * * Expect that only the intersections of r1 with r3 and r2 with r3 are removed * from the available memory pool. Expect the total size of available memory to * be updated and the counter to not be updated. */ static int memblock_remove_overlap_two_check(void) { struct memblock_region *rgn1, *rgn2; phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; rgn1 = &memblock.memory.regions[0]; rgn2 = &memblock.memory.regions[1]; struct region r1 = { .base = SZ_16M, .size = SZ_32M }; struct region r2 = { .base = SZ_64M, .size = SZ_64M }; struct region r3 = { .base = SZ_32M, .size = SZ_64M }; PREFIX_PUSH(); r2_end = r2.base + r2.size; r3_end = r3.base + r3.size; new_r1_size = r3.base - r1.base; new_r2_size = r2_end - r3_end; total_size = new_r1_size + new_r2_size; reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); memblock_remove(r3.base, r3.size); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(rgn1->size, new_r1_size); ASSERT_EQ(rgn2->base, r3_end); ASSERT_EQ(rgn2->size, new_r2_size); ASSERT_EQ(memblock.memory.cnt, 2); ASSERT_EQ(memblock.memory.total_size, total_size); test_pass_pop(); return 0; } static int memblock_remove_checks(void) { prefix_reset(); prefix_push(FUNC_REMOVE); test_print("Running %s tests...\n", FUNC_REMOVE); memblock_remove_simple_check(); memblock_remove_absent_check(); memblock_remove_overlap_top_check(); memblock_remove_overlap_bottom_check(); memblock_remove_within_check(); memblock_remove_only_region_check(); memblock_remove_near_max_check(); memblock_remove_overlap_two_check(); prefix_pop(); return 0; } /* * A simple test that tries to free a memory block r1 that was marked * earlier as reserved. By "freeing" a region we mean overwriting it with * the next entry r2 in memblock.reserved: * * | ...... +----+ | * | : r1 : | r2 | | * +--------------+----+-----------+----+-+ * ^ * | * rgn.base * * Expect to reserve two memory regions and then erase r1 region with the * value of r2. The region counter and total size are updated. */ static int memblock_free_simple_check(void) { struct memblock_region *rgn; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = SZ_4M, .size = SZ_1M }; struct region r2 = { .base = SZ_8M, .size = SZ_1M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); memblock_free((void *)r1.base, r1.size); ASSERT_EQ(rgn->base, r2.base); ASSERT_EQ(rgn->size, r2.size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, r2.size); test_pass_pop(); return 0; } /* * A test that tries to free a region r2 that was not marked as reserved * (i.e. has no corresponding entry in memblock.reserved): * * +----------------+ * | r2 | * +----------------+ * | +----+ | * | | r1 | | * +--+----+------------------------------+ * ^ * | * rgn.base * * The array, regions counter and total size are not modified. */ static int memblock_free_absent_check(void) { struct memblock_region *rgn; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = SZ_2M, .size = SZ_8K }; struct region r2 = { .base = SZ_16M, .size = SZ_128M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_free((void *)r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, r1.size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, r1.size); test_pass_pop(); return 0; } /* * A test that tries to free a region r2 that overlaps with the beginning * of the already existing entry r1 (that is r1.base < r2.base + r2.size): * * +----+ * | r2 | * +----+ * | ...+--------------+ | * | : | r1 | | * +----+--+--------------+---------------+ * ^ ^ * | | * | rgn.base * | * r1.base * * Expect that only the intersection of both regions is freed. The * regions counter and total size are updated. */ static int memblock_free_overlap_top_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = SZ_8M, .size = SZ_32M }; struct region r2 = { .base = SZ_1M, .size = SZ_8M }; PREFIX_PUSH(); total_size = (r1.size + r1.base) - (r2.base + r2.size); reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_free((void *)r2.base, r2.size); ASSERT_EQ(rgn->base, r2.base + r2.size); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to free a region r2 that overlaps with the end of * the already existing entry r1 (that is r2.base < r1.base + r1.size): * * +----------------+ * | r2 | * +----------------+ * | +-----------+..... | * | | r1 | : | * +----+-----------+----+----------------+ * * Expect that only the intersection of both regions is freed. The * regions counter and total size are updated. */ static int memblock_free_overlap_bottom_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = SZ_8M, .size = SZ_32M }; struct region r2 = { .base = SZ_32M, .size = SZ_32M }; PREFIX_PUSH(); total_size = r2.base - r1.base; reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_free((void *)r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to free a region r2 that is within the range of the * already existing entry r1 (that is * (r1.base < r2.base) && (r2.base + r2.size < r1.base + r1.size)): * * +----+ * | r2 | * +----+ * | +------------+....+---------------+ * | | rgn1 | r1 | rgn2 | * +----+------------+----+---------------+ * ^ * | * r1.base * * Expect that the region is split into two - one that ends at r2.base and * another that starts at r2.base + r2.size, with appropriate sizes. The * region counter and total size fields are updated. */ static int memblock_free_within_check(void) { struct memblock_region *rgn1, *rgn2; phys_addr_t r1_size, r2_size, total_size; rgn1 = &memblock.reserved.regions[0]; rgn2 = &memblock.reserved.regions[1]; struct region r1 = { .base = SZ_1M, .size = SZ_8M }; struct region r2 = { .base = SZ_4M, .size = SZ_1M }; PREFIX_PUSH(); r1_size = r2.base - r1.base; r2_size = (r1.base + r1.size) - (r2.base + r2.size); total_size = r1_size + r2_size; reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_free((void *)r2.base, r2.size); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(rgn1->size, r1_size); ASSERT_EQ(rgn2->base, r2.base + r2.size); ASSERT_EQ(rgn2->size, r2_size); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A simple test that tries to free a memory block r1 that was marked * earlier as reserved when r1 is the only available region. * Expect to reserve a memory block r1 and then free r1 so that r1 is * overwritten with a dummy region. The region counter stays the same, * and the total size is updated. */ static int memblock_free_only_region_check(void) { struct memblock_region *rgn; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = SZ_2K, .size = SZ_4K }; PREFIX_PUSH(); reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_free((void *)r1.base, r1.size); ASSERT_EQ(rgn->base, 0); ASSERT_EQ(rgn->size, 0); ASSERT_EQ(memblock.reserved.cnt, 0); ASSERT_EQ(memblock.reserved.total_size, 0); test_pass_pop(); return 0; } /* * A simple test that tries free a region r2 when r2 extends past PHYS_ADDR_MAX: * * +--------+ * | r2 | * +--------+ * | +---+....+ * | |rgn| | * +------------------------+---+----+ * * Expect that only the portion between PHYS_ADDR_MAX and r2.base is freed. * Expect the total size of reserved memory to be updated and the counter to * not be updated. */ static int memblock_free_near_max_check(void) { struct memblock_region *rgn; phys_addr_t total_size; rgn = &memblock.reserved.regions[0]; struct region r1 = { .base = PHYS_ADDR_MAX - SZ_2M, .size = SZ_2M }; struct region r2 = { .base = PHYS_ADDR_MAX - SZ_1M, .size = SZ_2M }; PREFIX_PUSH(); total_size = r1.size - (PHYS_ADDR_MAX - r2.base); reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_free((void *)r2.base, r2.size); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to free a reserved region r3 that overlaps with two * existing reserved regions r1 and r2: * * +----------------+ * | r3 | * +----------------+ * | +----+..... ........+--------+ * | | |r1 : : |r2 | | * +----+----+----+---+-------+--------+-----+ * * Expect that only the intersections of r1 with r3 and r2 with r3 are freed * from the collection of reserved memory. Expect the total size of reserved * memory to be updated and the counter to not be updated. */ static int memblock_free_overlap_two_check(void) { struct memblock_region *rgn1, *rgn2; phys_addr_t new_r1_size, new_r2_size, r2_end, r3_end, total_size; rgn1 = &memblock.reserved.regions[0]; rgn2 = &memblock.reserved.regions[1]; struct region r1 = { .base = SZ_16M, .size = SZ_32M }; struct region r2 = { .base = SZ_64M, .size = SZ_64M }; struct region r3 = { .base = SZ_32M, .size = SZ_64M }; PREFIX_PUSH(); r2_end = r2.base + r2.size; r3_end = r3.base + r3.size; new_r1_size = r3.base - r1.base; new_r2_size = r2_end - r3_end; total_size = new_r1_size + new_r2_size; reset_memblock_regions(); memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); memblock_free((void *)r3.base, r3.size); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(rgn1->size, new_r1_size); ASSERT_EQ(rgn2->base, r3_end); ASSERT_EQ(rgn2->size, new_r2_size); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } static int memblock_free_checks(void) { prefix_reset(); prefix_push(FUNC_FREE); test_print("Running %s tests...\n", FUNC_FREE); memblock_free_simple_check(); memblock_free_absent_check(); memblock_free_overlap_top_check(); memblock_free_overlap_bottom_check(); memblock_free_within_check(); memblock_free_only_region_check(); memblock_free_near_max_check(); memblock_free_overlap_two_check(); prefix_pop(); return 0; } static int memblock_set_bottom_up_check(void) { prefix_push("memblock_set_bottom_up"); memblock_set_bottom_up(false); ASSERT_EQ(memblock.bottom_up, false); memblock_set_bottom_up(true); ASSERT_EQ(memblock.bottom_up, true); reset_memblock_attributes(); test_pass_pop(); return 0; } static int memblock_bottom_up_check(void) { prefix_push("memblock_bottom_up"); memblock_set_bottom_up(false); ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); ASSERT_EQ(memblock_bottom_up(), false); memblock_set_bottom_up(true); ASSERT_EQ(memblock_bottom_up(), memblock.bottom_up); ASSERT_EQ(memblock_bottom_up(), true); reset_memblock_attributes(); test_pass_pop(); return 0; } static int memblock_bottom_up_checks(void) { test_print("Running memblock_*bottom_up tests...\n"); prefix_reset(); memblock_set_bottom_up_check(); prefix_reset(); memblock_bottom_up_check(); return 0; } /* * A test that tries to trim memory when both ends of the memory region are * aligned. Expect that the memory will not be trimmed. Expect the counter to * not be updated. */ static int memblock_trim_memory_aligned_check(void) { struct memblock_region *rgn; const phys_addr_t alignment = SMP_CACHE_BYTES; rgn = &memblock.memory.regions[0]; struct region r = { .base = alignment, .size = alignment * 4 }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r.base, r.size); memblock_trim_memory(alignment); ASSERT_EQ(rgn->base, r.base); ASSERT_EQ(rgn->size, r.size); ASSERT_EQ(memblock.memory.cnt, 1); test_pass_pop(); return 0; } /* * A test that tries to trim memory when there are two available regions, r1 and * r2. Region r1 is aligned on both ends and region r2 is unaligned on one end * and smaller than the alignment: * * alignment * |--------| * | +-----------------+ +------+ | * | | r1 | | r2 | | * +--------+-----------------+--------+------+---+ * ^ ^ ^ ^ ^ * |________|________|________| | * | Unaligned address * Aligned addresses * * Expect that r1 will not be trimmed and r2 will be removed. Expect the * counter to be updated. */ static int memblock_trim_memory_too_small_check(void) { struct memblock_region *rgn; const phys_addr_t alignment = SMP_CACHE_BYTES; rgn = &memblock.memory.regions[0]; struct region r1 = { .base = alignment, .size = alignment * 2 }; struct region r2 = { .base = alignment * 4, .size = alignment - SZ_2 }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); memblock_trim_memory(alignment); ASSERT_EQ(rgn->base, r1.base); ASSERT_EQ(rgn->size, r1.size); ASSERT_EQ(memblock.memory.cnt, 1); test_pass_pop(); return 0; } /* * A test that tries to trim memory when there are two available regions, r1 and * r2. Region r1 is aligned on both ends and region r2 is unaligned at the base * and aligned at the end: * * Unaligned address * | * v * | +-----------------+ +---------------+ | * | | r1 | | r2 | | * +--------+-----------------+----------+---------------+---+ * ^ ^ ^ ^ ^ ^ * |________|________|________|________|________| * | * Aligned addresses * * Expect that r1 will not be trimmed and r2 will be trimmed at the base. * Expect the counter to not be updated. */ static int memblock_trim_memory_unaligned_base_check(void) { struct memblock_region *rgn1, *rgn2; const phys_addr_t alignment = SMP_CACHE_BYTES; phys_addr_t offset = SZ_2; phys_addr_t new_r2_base, new_r2_size; rgn1 = &memblock.memory.regions[0]; rgn2 = &memblock.memory.regions[1]; struct region r1 = { .base = alignment, .size = alignment * 2 }; struct region r2 = { .base = alignment * 4 + offset, .size = alignment * 2 - offset }; PREFIX_PUSH(); new_r2_base = r2.base + (alignment - offset); new_r2_size = r2.size - (alignment - offset); reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); memblock_trim_memory(alignment); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(rgn1->size, r1.size); ASSERT_EQ(rgn2->base, new_r2_base); ASSERT_EQ(rgn2->size, new_r2_size); ASSERT_EQ(memblock.memory.cnt, 2); test_pass_pop(); return 0; } /* * A test that tries to trim memory when there are two available regions, r1 and * r2. Region r1 is aligned on both ends and region r2 is aligned at the base * and unaligned at the end: * * Unaligned address * | * v * | +-----------------+ +---------------+ | * | | r1 | | r2 | | * +--------+-----------------+--------+---------------+---+ * ^ ^ ^ ^ ^ ^ * |________|________|________|________|________| * | * Aligned addresses * * Expect that r1 will not be trimmed and r2 will be trimmed at the end. * Expect the counter to not be updated. */ static int memblock_trim_memory_unaligned_end_check(void) { struct memblock_region *rgn1, *rgn2; const phys_addr_t alignment = SMP_CACHE_BYTES; phys_addr_t offset = SZ_2; phys_addr_t new_r2_size; rgn1 = &memblock.memory.regions[0]; rgn2 = &memblock.memory.regions[1]; struct region r1 = { .base = alignment, .size = alignment * 2 }; struct region r2 = { .base = alignment * 4, .size = alignment * 2 - offset }; PREFIX_PUSH(); new_r2_size = r2.size - (alignment - offset); reset_memblock_regions(); memblock_add(r1.base, r1.size); memblock_add(r2.base, r2.size); memblock_trim_memory(alignment); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(rgn1->size, r1.size); ASSERT_EQ(rgn2->base, r2.base); ASSERT_EQ(rgn2->size, new_r2_size); ASSERT_EQ(memblock.memory.cnt, 2); test_pass_pop(); return 0; } static int memblock_trim_memory_checks(void) { prefix_reset(); prefix_push(FUNC_TRIM); test_print("Running %s tests...\n", FUNC_TRIM); memblock_trim_memory_aligned_check(); memblock_trim_memory_too_small_check(); memblock_trim_memory_unaligned_base_check(); memblock_trim_memory_unaligned_end_check(); prefix_pop(); return 0; } static int memblock_overlaps_region_check(void) { struct region r = { .base = SZ_1G, .size = SZ_4M }; PREFIX_PUSH(); reset_memblock_regions(); memblock_add(r.base, r.size); /* Far Away */ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1M, SZ_1M)); ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_2G, SZ_1M)); /* Neighbor */ ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_1M)); ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_4M, SZ_1M)); /* Partial Overlap */ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_2M)); ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_2M, SZ_2M)); /* Totally Overlap */ ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G, SZ_4M)); ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_2M, SZ_8M)); ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_1M, SZ_1M)); test_pass_pop(); return 0; } static int memblock_overlaps_region_checks(void) { prefix_reset(); prefix_push("memblock_overlaps_region"); test_print("Running memblock_overlaps_region tests...\n"); memblock_overlaps_region_check(); prefix_pop(); return 0; } int memblock_basic_checks(void) { memblock_initialization_check(); memblock_add_checks(); memblock_reserve_checks(); memblock_remove_checks(); memblock_free_checks(); memblock_bottom_up_checks(); memblock_trim_memory_checks(); memblock_overlaps_region_checks(); return 0; }
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1