Author | Tokens | Token Proportion | Commits | Commit Proportion |
---|---|---|---|---|
Karolina Drobnik | 5211 | 55.39% | 5 | 31.25% |
Rebecca Mckeever | 4126 | 43.86% | 10 | 62.50% |
Claudio Migliorelli | 70 | 0.74% | 1 | 6.25% |
Total | 9407 | 16 |
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733
// SPDX-License-Identifier: GPL-2.0-or-later #include "alloc_nid_api.h" static int alloc_nid_test_flags = TEST_F_NONE; /* * contains the fraction of MEM_SIZE contained in each node in basis point * units (one hundredth of 1% or 1/10000) */ static const unsigned int node_fractions[] = { 2500, /* 1/4 */ 625, /* 1/16 */ 1250, /* 1/8 */ 1250, /* 1/8 */ 625, /* 1/16 */ 625, /* 1/16 */ 2500, /* 1/4 */ 625, /* 1/16 */ }; static inline const char * const get_memblock_alloc_nid_name(int flags) { if (flags & TEST_F_EXACT) return "memblock_alloc_exact_nid_raw"; if (flags & TEST_F_RAW) return "memblock_alloc_try_nid_raw"; return "memblock_alloc_try_nid"; } static inline void *run_memblock_alloc_nid(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, phys_addr_t max_addr, int nid) { assert(!(alloc_nid_test_flags & TEST_F_EXACT) || (alloc_nid_test_flags & TEST_F_RAW)); /* * TEST_F_EXACT should be checked before TEST_F_RAW since * memblock_alloc_exact_nid_raw() performs raw allocations. */ if (alloc_nid_test_flags & TEST_F_EXACT) return memblock_alloc_exact_nid_raw(size, align, min_addr, max_addr, nid); if (alloc_nid_test_flags & TEST_F_RAW) return memblock_alloc_try_nid_raw(size, align, min_addr, max_addr, nid); return memblock_alloc_try_nid(size, align, min_addr, max_addr, nid); } /* * A simple test that tries to allocate a memory region within min_addr and * max_addr range: * * + + * | + +-----------+ | * | | | rgn | | * +----+-------+-----------+------+ * ^ ^ * | | * min_addr max_addr * * Expect to allocate a region that ends at max_addr. */ static int alloc_nid_top_down_simple_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_128; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t rgn_end; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2; max_addr = min_addr + SZ_512; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); rgn_end = rgn->base + rgn->size; ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, max_addr - size); ASSERT_EQ(rgn_end, max_addr); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A simple test that tries to allocate a memory region within min_addr and * max_addr range, where the end address is misaligned: * * + + + * | + +---------+ + | * | | | rgn | | | * +------+-------+---------+--+----+ * ^ ^ ^ * | | | * min_add | max_addr * | * Aligned address * boundary * * Expect to allocate an aligned region that ends before max_addr. */ static int alloc_nid_top_down_end_misaligned_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_128; phys_addr_t misalign = SZ_2; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t rgn_end; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2; max_addr = min_addr + SZ_512 + misalign; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); rgn_end = rgn->base + rgn->size; ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, max_addr - size - misalign); ASSERT_LT(rgn_end, max_addr); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A simple test that tries to allocate a memory region, which spans over the * min_addr and max_addr range: * * + + * | +---------------+ | * | | rgn | | * +------+---------------+-------+ * ^ ^ * | | * min_addr max_addr * * Expect to allocate a region that starts at min_addr and ends at * max_addr, given that min_addr is aligned. */ static int alloc_nid_exact_address_generic_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_1K; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t rgn_end; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES; max_addr = min_addr + size; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); rgn_end = rgn->base + rgn->size; ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, min_addr); ASSERT_EQ(rgn_end, max_addr); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region, which can't fit into * min_addr and max_addr range: * * + + + * | +----------+-----+ | * | | rgn + | | * +--------+----------+-----+----+ * ^ ^ ^ * | | | * Aligned | max_addr * address | * boundary min_add * * Expect to drop the lower limit and allocate a memory region which * ends at max_addr (if the address is aligned). */ static int alloc_nid_top_down_narrow_range_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_256; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM() + SZ_512; max_addr = min_addr + SMP_CACHE_BYTES; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, max_addr - size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region, which can't fit into * min_addr and max_addr range, with the latter being too close to the beginning * of the available memory: * * +-------------+ * | new | * +-------------+ * + + * | + | * | | | * +-------+--------------+ * ^ ^ * | | * | max_addr * | * min_addr * * Expect no allocation to happen. */ static int alloc_nid_low_max_generic_check(void) { void *allocated_ptr = NULL; phys_addr_t size = SZ_1K; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM(); max_addr = min_addr + SMP_CACHE_BYTES; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_EQ(allocated_ptr, NULL); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region within min_addr min_addr range, * with min_addr being so close that it's next to an allocated region: * * + + * | +--------+---------------| * | | r1 | rgn | * +-------+--------+---------------+ * ^ ^ * | | * min_addr max_addr * * Expect a merge of both regions. Only the region size gets updated. */ static int alloc_nid_min_reserved_generic_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t r1_size = SZ_128; phys_addr_t r2_size = SZ_64; phys_addr_t total_size = r1_size + r2_size; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t reserved_base; PREFIX_PUSH(); setup_memblock(); max_addr = memblock_end_of_DRAM(); min_addr = max_addr - r2_size; reserved_base = min_addr - r1_size; memblock_reserve(reserved_base, r1_size); allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(rgn->base, reserved_base); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region within min_addr and max_addr, * with max_addr being so close that it's next to an allocated region: * * + + * | +-------------+--------| * | | rgn | r1 | * +----------+-------------+--------+ * ^ ^ * | | * min_addr max_addr * * Expect a merge of regions. Only the region size gets updated. */ static int alloc_nid_max_reserved_generic_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t r1_size = SZ_64; phys_addr_t r2_size = SZ_128; phys_addr_t total_size = r1_size + r2_size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_memblock(); max_addr = memblock_end_of_DRAM() - r1_size; min_addr = max_addr - r2_size; memblock_reserve(max_addr, r1_size); allocated_ptr = run_memblock_alloc_nid(r2_size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, r2_size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(rgn->base, min_addr); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap big enough to fit * a new region: * * + + * | +--------+ +-------+------+ | * | | r2 | | rgn | r1 | | * +----+--------+---+-------+------+--+ * ^ ^ * | | * min_addr max_addr * * Expect to merge the new region with r1. The second region does not get * updated. The total size field gets updated. */ static int alloc_nid_top_down_reserved_with_space_check(void) { struct memblock_region *rgn1 = &memblock.reserved.regions[1]; struct memblock_region *rgn2 = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; struct region r1, r2; phys_addr_t r3_size = SZ_64; phys_addr_t gap_size = SMP_CACHE_BYTES; phys_addr_t total_size; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_memblock(); r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; r1.size = SMP_CACHE_BYTES; r2.size = SZ_128; r2.base = r1.base - (r3_size + gap_size + r2.size); total_size = r1.size + r2.size + r3_size; min_addr = r2.base + r2.size; max_addr = r1.base; memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); ASSERT_EQ(rgn1->size, r1.size + r3_size); ASSERT_EQ(rgn1->base, max_addr - r3_size); ASSERT_EQ(rgn2->size, r2.size); ASSERT_EQ(rgn2->base, r2.base); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap of a size equal to * the size of the new region: * * + + * | +--------+--------+--------+ | * | | r2 | r3 | r1 | | * +-----+--------+--------+--------+-----+ * ^ ^ * | | * min_addr max_addr * * Expect to merge all of the regions into one. The region counter and total * size fields get updated. */ static int alloc_nid_reserved_full_merge_generic_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; struct region r1, r2; phys_addr_t r3_size = SZ_64; phys_addr_t total_size; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_memblock(); r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; r1.size = SMP_CACHE_BYTES; r2.size = SZ_128; r2.base = r1.base - (r3_size + r2.size); total_size = r1.size + r2.size + r3_size; min_addr = r2.base + r2.size; max_addr = r1.base; memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, total_size); ASSERT_EQ(rgn->base, r2.base); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap that can't fit * a new region: * * + + * | +----------+------+ +------+ | * | | r3 | r2 | | r1 | | * +--+----------+------+----+------+---+ * ^ ^ * | | * | max_addr * | * min_addr * * Expect to merge the new region with r2. The second region does not get * updated. The total size counter gets updated. */ static int alloc_nid_top_down_reserved_no_space_check(void) { struct memblock_region *rgn1 = &memblock.reserved.regions[1]; struct memblock_region *rgn2 = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; struct region r1, r2; phys_addr_t r3_size = SZ_256; phys_addr_t gap_size = SMP_CACHE_BYTES; phys_addr_t total_size; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_memblock(); r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; r1.size = SMP_CACHE_BYTES; r2.size = SZ_128; r2.base = r1.base - (r2.size + gap_size); total_size = r1.size + r2.size + r3_size; min_addr = r2.base + r2.size; max_addr = r1.base; memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); ASSERT_EQ(rgn1->size, r1.size); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(rgn2->size, r2.size + r3_size); ASSERT_EQ(rgn2->base, r2.base - r3_size); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range, but * it's too narrow and everything else is reserved: * * +-----------+ * | new | * +-----------+ * + + * |--------------+ +----------| * | r2 | | r1 | * +--------------+------+----------+ * ^ ^ * | | * | max_addr * | * min_addr * * Expect no allocation to happen. */ static int alloc_nid_reserved_all_generic_check(void) { void *allocated_ptr = NULL; struct region r1, r2; phys_addr_t r3_size = SZ_256; phys_addr_t gap_size = SMP_CACHE_BYTES; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_memblock(); r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES; r1.size = SMP_CACHE_BYTES; r2.size = MEM_SIZE - (r1.size + gap_size); r2.base = memblock_start_of_DRAM(); min_addr = r2.base + r2.size; max_addr = r1.base; memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_EQ(allocated_ptr, NULL); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region, where max_addr is * bigger than the end address of the available memory. Expect to allocate * a region that ends before the end of the memory. */ static int alloc_nid_top_down_cap_max_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_256; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_end_of_DRAM() - SZ_1K; max_addr = memblock_end_of_DRAM() + SZ_256; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region, where min_addr is * smaller than the start address of the available memory. Expect to allocate * a region that ends before the end of the memory. */ static int alloc_nid_top_down_cap_min_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_1K; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM() - SZ_256; max_addr = memblock_end_of_DRAM(); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, memblock_end_of_DRAM() - size); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A simple test that tries to allocate a memory region within min_addr and * max_addr range: * * + + * | +-----------+ | | * | | rgn | | | * +----+-----------+-----------+------+ * ^ ^ * | | * min_addr max_addr * * Expect to allocate a region that ends before max_addr. */ static int alloc_nid_bottom_up_simple_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_128; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t rgn_end; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM() + SMP_CACHE_BYTES * 2; max_addr = min_addr + SZ_512; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); rgn_end = rgn->base + rgn->size; ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, min_addr); ASSERT_LT(rgn_end, max_addr); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A simple test that tries to allocate a memory region within min_addr and * max_addr range, where the start address is misaligned: * * + + * | + +-----------+ + | * | | | rgn | | | * +-----+---+-----------+-----+-----+ * ^ ^----. ^ * | | | * min_add | max_addr * | * Aligned address * boundary * * Expect to allocate an aligned region that ends before max_addr. */ static int alloc_nid_bottom_up_start_misaligned_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_128; phys_addr_t misalign = SZ_2; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t rgn_end; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM() + misalign; max_addr = min_addr + SZ_512; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); rgn_end = rgn->base + rgn->size; ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, min_addr + (SMP_CACHE_BYTES - misalign)); ASSERT_LT(rgn_end, max_addr); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region, which can't fit into min_addr * and max_addr range: * * + + * |---------+ + + | * | rgn | | | | * +---------+---------+----+------+ * ^ ^ * | | * | max_addr * | * min_add * * Expect to drop the lower limit and allocate a memory region which * starts at the beginning of the available memory. */ static int alloc_nid_bottom_up_narrow_range_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_256; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM() + SZ_512; max_addr = min_addr + SMP_CACHE_BYTES; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap big enough to fit * a new region: * * + + * | +--------+-------+ +------+ | * | | r2 | rgn | | r1 | | * +----+--------+-------+---+------+--+ * ^ ^ * | | * min_addr max_addr * * Expect to merge the new region with r2. The second region does not get * updated. The total size field gets updated. */ static int alloc_nid_bottom_up_reserved_with_space_check(void) { struct memblock_region *rgn1 = &memblock.reserved.regions[1]; struct memblock_region *rgn2 = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; struct region r1, r2; phys_addr_t r3_size = SZ_64; phys_addr_t gap_size = SMP_CACHE_BYTES; phys_addr_t total_size; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_memblock(); r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; r1.size = SMP_CACHE_BYTES; r2.size = SZ_128; r2.base = r1.base - (r3_size + gap_size + r2.size); total_size = r1.size + r2.size + r3_size; min_addr = r2.base + r2.size; max_addr = r1.base; memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); ASSERT_EQ(rgn1->size, r1.size); ASSERT_EQ(rgn1->base, max_addr); ASSERT_EQ(rgn2->size, r2.size + r3_size); ASSERT_EQ(rgn2->base, r2.base); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap of a size equal to * the size of the new region: * * + + * |----------+ +------+ +----+ | * | r3 | | r2 | | r1 | | * +----------+----+------+---+----+--+ * ^ ^ * | | * | max_addr * | * min_addr * * Expect to drop the lower limit and allocate memory at the beginning of the * available memory. The region counter and total size fields get updated. * Other regions are not modified. */ static int alloc_nid_bottom_up_reserved_no_space_check(void) { struct memblock_region *rgn1 = &memblock.reserved.regions[2]; struct memblock_region *rgn2 = &memblock.reserved.regions[1]; struct memblock_region *rgn3 = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; struct region r1, r2; phys_addr_t r3_size = SZ_256; phys_addr_t gap_size = SMP_CACHE_BYTES; phys_addr_t total_size; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_memblock(); r1.base = memblock_end_of_DRAM() - SMP_CACHE_BYTES * 2; r1.size = SMP_CACHE_BYTES; r2.size = SZ_128; r2.base = r1.base - (r2.size + gap_size); total_size = r1.size + r2.size + r3_size; min_addr = r2.base + r2.size; max_addr = r1.base; memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); allocated_ptr = run_memblock_alloc_nid(r3_size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, r3_size, alloc_nid_test_flags); ASSERT_EQ(rgn3->size, r3_size); ASSERT_EQ(rgn3->base, memblock_start_of_DRAM()); ASSERT_EQ(rgn2->size, r2.size); ASSERT_EQ(rgn2->base, r2.base); ASSERT_EQ(rgn1->size, r1.size); ASSERT_EQ(rgn1->base, r1.base); ASSERT_EQ(memblock.reserved.cnt, 3); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region, where max_addr is * bigger than the end address of the available memory. Expect to allocate * a region that starts at the min_addr. */ static int alloc_nid_bottom_up_cap_max_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_256; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM() + SZ_1K; max_addr = memblock_end_of_DRAM() + SZ_256; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, min_addr); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region, where min_addr is * smaller than the start address of the available memory. Expect to allocate * a region at the beginning of the available memory. */ static int alloc_nid_bottom_up_cap_min_check(void) { struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; phys_addr_t size = SZ_1K; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_memblock(); min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM() - SZ_256; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(rgn->size, size); ASSERT_EQ(rgn->base, memblock_start_of_DRAM()); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* Test case wrappers for range tests */ static int alloc_nid_simple_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_simple_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_simple_check(); return 0; } static int alloc_nid_misaligned_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_end_misaligned_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_start_misaligned_check(); return 0; } static int alloc_nid_narrow_range_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_narrow_range_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_narrow_range_check(); return 0; } static int alloc_nid_reserved_with_space_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_reserved_with_space_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_reserved_with_space_check(); return 0; } static int alloc_nid_reserved_no_space_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_reserved_no_space_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_reserved_no_space_check(); return 0; } static int alloc_nid_cap_max_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_cap_max_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_cap_max_check(); return 0; } static int alloc_nid_cap_min_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_cap_min_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_cap_min_check(); return 0; } static int alloc_nid_min_reserved_check(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_nid_min_reserved_generic_check); run_bottom_up(alloc_nid_min_reserved_generic_check); return 0; } static int alloc_nid_max_reserved_check(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_nid_max_reserved_generic_check); run_bottom_up(alloc_nid_max_reserved_generic_check); return 0; } static int alloc_nid_exact_address_check(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_nid_exact_address_generic_check); run_bottom_up(alloc_nid_exact_address_generic_check); return 0; } static int alloc_nid_reserved_full_merge_check(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_nid_reserved_full_merge_generic_check); run_bottom_up(alloc_nid_reserved_full_merge_generic_check); return 0; } static int alloc_nid_reserved_all_check(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_nid_reserved_all_generic_check); run_bottom_up(alloc_nid_reserved_all_generic_check); return 0; } static int alloc_nid_low_max_check(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_nid_low_max_generic_check); run_bottom_up(alloc_nid_low_max_generic_check); return 0; } static int memblock_alloc_nid_range_checks(void) { test_print("Running %s range tests...\n", get_memblock_alloc_nid_name(alloc_nid_test_flags)); alloc_nid_simple_check(); alloc_nid_misaligned_check(); alloc_nid_narrow_range_check(); alloc_nid_reserved_with_space_check(); alloc_nid_reserved_no_space_check(); alloc_nid_cap_max_check(); alloc_nid_cap_min_check(); alloc_nid_min_reserved_check(); alloc_nid_max_reserved_check(); alloc_nid_exact_address_check(); alloc_nid_reserved_full_merge_check(); alloc_nid_reserved_all_check(); alloc_nid_low_max_check(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * has enough memory to allocate a region of the requested size. * Expect to allocate an aligned region at the end of the requested node. */ static int alloc_nid_top_down_numa_simple_check(void) { int nid_req = 3; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); ASSERT_LE(SZ_4, req_node->size); size = req_node->size / SZ_4; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, region_end(req_node) - size); ASSERT_LE(req_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * does not have enough memory to allocate a region of the requested size: * * | +-----+ +------------------+ | * | | req | | expected | | * +---+-----+----------+------------------+-----+ * * | +---------+ | * | | rgn | | * +-----------------------------+---------+-----+ * * Expect to allocate an aligned region at the end of the last node that has * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE. */ static int alloc_nid_top_down_numa_small_node_check(void) { int nid_req = 1; int nid_exp = 6; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); size = SZ_2 * req_node->size; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, region_end(exp_node) - size); ASSERT_LE(exp_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * is fully reserved: * * | +---------+ +------------------+ | * | |requested| | expected | | * +--------------+---------+------------+------------------+-----+ * * | +---------+ +---------+ | * | | reserved| | new | | * +--------------+---------+---------------------+---------+-----+ * * Expect to allocate an aligned region at the end of the last node that is * large enough and has enough unreserved memory (in this case, nid = 6) after * falling back to NUMA_NO_NODE. The region count and total size get updated. */ static int alloc_nid_top_down_numa_node_reserved_check(void) { int nid_req = 2; int nid_exp = 6; struct memblock_region *new_rgn = &memblock.reserved.regions[1]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); size = req_node->size; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); memblock_reserve(req_node->base, req_node->size); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, region_end(exp_node) - size); ASSERT_LE(exp_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, size + req_node->size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * is partially reserved but has enough memory for the allocated region: * * | +---------------------------------------+ | * | | requested | | * +-----------+---------------------------------------+----------+ * * | +------------------+ +-----+ | * | | reserved | | new | | * +-----------+------------------+--------------+-----+----------+ * * Expect to allocate an aligned region at the end of the requested node. The * region count and total size get updated. */ static int alloc_nid_top_down_numa_part_reserved_check(void) { int nid_req = 4; struct memblock_region *new_rgn = &memblock.reserved.regions[1]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL; struct region r1; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); ASSERT_LE(SZ_8, req_node->size); r1.base = req_node->base; r1.size = req_node->size / SZ_2; size = r1.size / SZ_4; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); memblock_reserve(r1.base, r1.size); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, region_end(req_node) - size); ASSERT_LE(req_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, size + r1.size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * is partially reserved and does not have enough contiguous memory for the * allocated region: * * | +-----------------------+ +----------------------| * | | requested | | expected | * +-----------+-----------------------+---------+----------------------+ * * | +----------+ +-----------| * | | reserved | | new | * +-----------------+----------+---------------------------+-----------+ * * Expect to allocate an aligned region at the end of the last node that is * large enough and has enough unreserved memory (in this case, * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count * and total size get updated. */ static int alloc_nid_top_down_numa_part_reserved_fallback_check(void) { int nid_req = 4; int nid_exp = NUMA_NODES - 1; struct memblock_region *new_rgn = &memblock.reserved.regions[1]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; struct region r1; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); ASSERT_LE(SZ_4, req_node->size); size = req_node->size / SZ_2; r1.base = req_node->base + (size / SZ_2); r1.size = size; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); memblock_reserve(r1.base, r1.size); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, region_end(exp_node) - size); ASSERT_LE(exp_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, size + r1.size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the first * node is the requested node: * * min_addr * | max_addr * | | * v v * | +-----------------------+-----------+ | * | | requested | node3 | | * +-----------+-----------------------+-----------+--------------+ * + + * | +-----------+ | * | | rgn | | * +-----------------------+-----------+--------------------------+ * * Expect to drop the lower limit and allocate a memory region that ends at * the end of the requested node. */ static int alloc_nid_top_down_numa_split_range_low_check(void) { int nid_req = 2; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL; phys_addr_t size = SZ_512; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t req_node_end; PREFIX_PUSH(); setup_numa_memblock(node_fractions); req_node_end = region_end(req_node); min_addr = req_node_end - SZ_256; max_addr = min_addr + size; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, req_node_end - size); ASSERT_LE(req_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the second * node is the requested node: * * min_addr * | max_addr * | | * v v * | +--------------------------+---------+ | * | | expected |requested| | * +------+--------------------------+---------+----------------+ * + + * | +---------+ | * | | rgn | | * +-----------------------+---------+--------------------------+ * * Expect to drop the lower limit and allocate a memory region that * ends at the end of the first node that overlaps with the range. */ static int alloc_nid_top_down_numa_split_range_high_check(void) { int nid_req = 3; int nid_exp = nid_req - 1; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; phys_addr_t size = SZ_512; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t exp_node_end; PREFIX_PUSH(); setup_numa_memblock(node_fractions); exp_node_end = region_end(exp_node); min_addr = exp_node_end - SZ_256; max_addr = min_addr + size; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, exp_node_end - size); ASSERT_LE(exp_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the requested * node ends before min_addr: * * min_addr * | max_addr * | | * v v * | +---------------+ +-------------+---------+ | * | | requested | | node1 | node2 | | * +----+---------------+--------+-------------+---------+----------+ * + + * | +---------+ | * | | rgn | | * +----------+---------+-------------------------------------------+ * * Expect to drop the lower limit and allocate a memory region that ends at * the end of the requested node. */ static int alloc_nid_top_down_numa_no_overlap_split_check(void) { int nid_req = 2; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *node2 = &memblock.memory.regions[6]; void *allocated_ptr = NULL; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); size = SZ_512; min_addr = node2->base - SZ_256; max_addr = min_addr + size; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, region_end(req_node) - size); ASSERT_LE(req_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range when * the requested node and the range do not overlap, and requested node ends * before min_addr. The range overlaps with multiple nodes along node * boundaries: * * min_addr * | max_addr * | | * v v * |-----------+ +----------+----...----+----------+ | * | requested | | min node | ... | max node | | * +-----------+-----------+----------+----...----+----------+------+ * + + * | +-----+ | * | | rgn | | * +---------------------------------------------------+-----+------+ * * Expect to allocate a memory region at the end of the final node in * the range after falling back to NUMA_NO_NODE. */ static int alloc_nid_top_down_numa_no_overlap_low_check(void) { int nid_req = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *min_node = &memblock.memory.regions[2]; struct memblock_region *max_node = &memblock.memory.regions[5]; void *allocated_ptr = NULL; phys_addr_t size = SZ_64; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); min_addr = min_node->base; max_addr = region_end(max_node); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, max_addr - size); ASSERT_LE(max_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range when * the requested node and the range do not overlap, and requested node starts * after max_addr. The range overlaps with multiple nodes along node * boundaries: * * min_addr * | max_addr * | | * v v * | +----------+----...----+----------+ +-----------+ | * | | min node | ... | max node | | requested | | * +-----+----------+----...----+----------+--------+-----------+---+ * + + * | +-----+ | * | | rgn | | * +---------------------------------+-----+------------------------+ * * Expect to allocate a memory region at the end of the final node in * the range after falling back to NUMA_NO_NODE. */ static int alloc_nid_top_down_numa_no_overlap_high_check(void) { int nid_req = 7; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *min_node = &memblock.memory.regions[2]; struct memblock_region *max_node = &memblock.memory.regions[5]; void *allocated_ptr = NULL; phys_addr_t size = SZ_64; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); min_addr = min_node->base; max_addr = region_end(max_node); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, max_addr - size); ASSERT_LE(max_node->base, new_rgn->base); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * has enough memory to allocate a region of the requested size. * Expect to allocate an aligned region at the beginning of the requested node. */ static int alloc_nid_bottom_up_numa_simple_check(void) { int nid_req = 3; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); ASSERT_LE(SZ_4, req_node->size); size = req_node->size / SZ_4; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, req_node->base); ASSERT_LE(region_end(new_rgn), region_end(req_node)); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * does not have enough memory to allocate a region of the requested size: * * |----------------------+-----+ | * | expected | req | | * +----------------------+-----+----------------+ * * |---------+ | * | rgn | | * +---------+-----------------------------------+ * * Expect to allocate an aligned region at the beginning of the first node that * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE. */ static int alloc_nid_bottom_up_numa_small_node_check(void) { int nid_req = 1; int nid_exp = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); size = SZ_2 * req_node->size; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, exp_node->base); ASSERT_LE(region_end(new_rgn), region_end(exp_node)); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * is fully reserved: * * |----------------------+ +-----------+ | * | expected | | requested | | * +----------------------+-----+-----------+--------------------+ * * |-----------+ +-----------+ | * | new | | reserved | | * +-----------+----------------+-----------+--------------------+ * * Expect to allocate an aligned region at the beginning of the first node that * is large enough and has enough unreserved memory (in this case, nid = 0) * after falling back to NUMA_NO_NODE. The region count and total size get * updated. */ static int alloc_nid_bottom_up_numa_node_reserved_check(void) { int nid_req = 2; int nid_exp = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); size = req_node->size; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); memblock_reserve(req_node->base, req_node->size); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, exp_node->base); ASSERT_LE(region_end(new_rgn), region_end(exp_node)); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, size + req_node->size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * is partially reserved but has enough memory for the allocated region: * * | +---------------------------------------+ | * | | requested | | * +-----------+---------------------------------------+---------+ * * | +------------------+-----+ | * | | reserved | new | | * +-----------+------------------+-----+------------------------+ * * Expect to allocate an aligned region in the requested node that merges with * the existing reserved region. The total size gets updated. */ static int alloc_nid_bottom_up_numa_part_reserved_check(void) { int nid_req = 4; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL; struct region r1; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t total_size; PREFIX_PUSH(); setup_numa_memblock(node_fractions); ASSERT_LE(SZ_8, req_node->size); r1.base = req_node->base; r1.size = req_node->size / SZ_2; size = r1.size / SZ_4; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); total_size = size + r1.size; memblock_reserve(r1.base, r1.size); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, total_size); ASSERT_EQ(new_rgn->base, req_node->base); ASSERT_LE(region_end(new_rgn), region_end(req_node)); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * is partially reserved and does not have enough contiguous memory for the * allocated region: * * |----------------------+ +-----------------------+ | * | expected | | requested | | * +----------------------+-------+-----------------------+---------+ * * |-----------+ +----------+ | * | new | | reserved | | * +-----------+------------------------+----------+----------------+ * * Expect to allocate an aligned region at the beginning of the first * node that is large enough and has enough unreserved memory (in this case, * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size * get updated. */ static int alloc_nid_bottom_up_numa_part_reserved_fallback_check(void) { int nid_req = 4; int nid_exp = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; struct region r1; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); ASSERT_LE(SZ_4, req_node->size); size = req_node->size / SZ_2; r1.base = req_node->base + (size / SZ_2); r1.size = size; min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); memblock_reserve(r1.base, r1.size); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, exp_node->base); ASSERT_LE(region_end(new_rgn), region_end(exp_node)); ASSERT_EQ(memblock.reserved.cnt, 2); ASSERT_EQ(memblock.reserved.total_size, size + r1.size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the first * node is the requested node: * * min_addr * | max_addr * | | * v v * | +-----------------------+-----------+ | * | | requested | node3 | | * +-----------+-----------------------+-----------+--------------+ * + + * | +-----------+ | * | | rgn | | * +-----------+-----------+--------------------------------------+ * * Expect to drop the lower limit and allocate a memory region at the beginning * of the requested node. */ static int alloc_nid_bottom_up_numa_split_range_low_check(void) { int nid_req = 2; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL; phys_addr_t size = SZ_512; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t req_node_end; PREFIX_PUSH(); setup_numa_memblock(node_fractions); req_node_end = region_end(req_node); min_addr = req_node_end - SZ_256; max_addr = min_addr + size; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, req_node->base); ASSERT_LE(region_end(new_rgn), req_node_end); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the second * node is the requested node: * * min_addr * | max_addr * | | * v v * |------------------+ +----------------------+---------+ | * | expected | | previous |requested| | * +------------------+--------+----------------------+---------+------+ * + + * |---------+ | * | rgn | | * +---------+---------------------------------------------------------+ * * Expect to drop the lower limit and allocate a memory region at the beginning * of the first node that has enough memory. */ static int alloc_nid_bottom_up_numa_split_range_high_check(void) { int nid_req = 3; int nid_exp = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; phys_addr_t size = SZ_512; phys_addr_t min_addr; phys_addr_t max_addr; phys_addr_t exp_node_end; PREFIX_PUSH(); setup_numa_memblock(node_fractions); exp_node_end = region_end(req_node); min_addr = req_node->base - SZ_256; max_addr = min_addr + size; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, exp_node->base); ASSERT_LE(region_end(new_rgn), exp_node_end); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the requested * node ends before min_addr: * * min_addr * | max_addr * | | * v v * | +---------------+ +-------------+---------+ | * | | requested | | node1 | node2 | | * +----+---------------+--------+-------------+---------+---------+ * + + * | +---------+ | * | | rgn | | * +----+---------+------------------------------------------------+ * * Expect to drop the lower limit and allocate a memory region that starts at * the beginning of the requested node. */ static int alloc_nid_bottom_up_numa_no_overlap_split_check(void) { int nid_req = 2; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *node2 = &memblock.memory.regions[6]; void *allocated_ptr = NULL; phys_addr_t size; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); size = SZ_512; min_addr = node2->base - SZ_256; max_addr = min_addr + size; allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, req_node->base); ASSERT_LE(region_end(new_rgn), region_end(req_node)); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range when * the requested node and the range do not overlap, and requested node ends * before min_addr. The range overlaps with multiple nodes along node * boundaries: * * min_addr * | max_addr * | | * v v * |-----------+ +----------+----...----+----------+ | * | requested | | min node | ... | max node | | * +-----------+-----------+----------+----...----+----------+------+ * + + * | +-----+ | * | | rgn | | * +-----------------------+-----+----------------------------------+ * * Expect to allocate a memory region at the beginning of the first node * in the range after falling back to NUMA_NO_NODE. */ static int alloc_nid_bottom_up_numa_no_overlap_low_check(void) { int nid_req = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *min_node = &memblock.memory.regions[2]; struct memblock_region *max_node = &memblock.memory.regions[5]; void *allocated_ptr = NULL; phys_addr_t size = SZ_64; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); min_addr = min_node->base; max_addr = region_end(max_node); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, min_addr); ASSERT_LE(region_end(new_rgn), region_end(min_node)); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range when * the requested node and the range do not overlap, and requested node starts * after max_addr. The range overlaps with multiple nodes along node * boundaries: * * min_addr * | max_addr * | | * v v * | +----------+----...----+----------+ +---------+ | * | | min node | ... | max node | |requested| | * +-----+----------+----...----+----------+---------+---------+---+ * + + * | +-----+ | * | | rgn | | * +-----+-----+---------------------------------------------------+ * * Expect to allocate a memory region at the beginning of the first node * in the range after falling back to NUMA_NO_NODE. */ static int alloc_nid_bottom_up_numa_no_overlap_high_check(void) { int nid_req = 7; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *min_node = &memblock.memory.regions[2]; struct memblock_region *max_node = &memblock.memory.regions[5]; void *allocated_ptr = NULL; phys_addr_t size = SZ_64; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); min_addr = min_node->base; max_addr = region_end(max_node); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, size); ASSERT_EQ(new_rgn->base, min_addr); ASSERT_LE(region_end(new_rgn), region_end(min_node)); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, size); test_pass_pop(); return 0; } /* * A test that tries to allocate a memory region in a specific NUMA node that * does not have enough memory to allocate a region of the requested size. * Additionally, none of the nodes have enough memory to allocate the region: * * +-----------------------------------+ * | new | * +-----------------------------------+ * |-------+-------+-------+-------+-------+-------+-------+-------| * | node0 | node1 | node2 | node3 | node4 | node5 | node6 | node7 | * +-------+-------+-------+-------+-------+-------+-------+-------+ * * Expect no allocation to happen. */ static int alloc_nid_numa_large_region_generic_check(void) { int nid_req = 3; void *allocated_ptr = NULL; phys_addr_t size = MEM_SIZE / SZ_2; phys_addr_t min_addr; phys_addr_t max_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); min_addr = memblock_start_of_DRAM(); max_addr = memblock_end_of_DRAM(); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_EQ(allocated_ptr, NULL); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_addr range when * there are two reserved regions at the borders. The requested node starts at * min_addr and ends at max_addr and is the same size as the region to be * allocated: * * min_addr * | max_addr * | | * v v * | +-----------+-----------------------+-----------------------| * | | node5 | requested | node7 | * +------+-----------+-----------------------+-----------------------+ * + + * | +----+-----------------------+----+ | * | | r2 | new | r1 | | * +-------------+----+-----------------------+----+------------------+ * * Expect to merge all of the regions into one. The region counter and total * size fields get updated. */ static int alloc_nid_numa_reserved_full_merge_generic_check(void) { int nid_req = 6; int nid_next = nid_req + 1; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *next_node = &memblock.memory.regions[nid_next]; void *allocated_ptr = NULL; struct region r1, r2; phys_addr_t size = req_node->size; phys_addr_t total_size; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); r1.base = next_node->base; r1.size = SZ_128; r2.size = SZ_128; r2.base = r1.base - (size + r2.size); total_size = r1.size + r2.size + size; min_addr = r2.base + r2.size; max_addr = r1.base; memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, nid_req); ASSERT_NE(allocated_ptr, NULL); assert_mem_content(allocated_ptr, size, alloc_nid_test_flags); ASSERT_EQ(new_rgn->size, total_size); ASSERT_EQ(new_rgn->base, r2.base); ASSERT_LE(new_rgn->base, req_node->base); ASSERT_LE(region_end(req_node), region_end(new_rgn)); ASSERT_EQ(memblock.reserved.cnt, 1); ASSERT_EQ(memblock.reserved.total_size, total_size); test_pass_pop(); return 0; } /* * A test that tries to allocate memory within min_addr and max_add range, * where the total range can fit the region, but it is split between two nodes * and everything else is reserved. Additionally, nid is set to NUMA_NO_NODE * instead of requesting a specific node: * * +-----------+ * | new | * +-----------+ * | +---------------------+-----------| * | | prev node | next node | * +------+---------------------+-----------+ * + + * |----------------------+ +-----| * | r1 | | r2 | * +----------------------+-----------+-----+ * ^ ^ * | | * | max_addr * | * min_addr * * Expect no allocation to happen. */ static int alloc_nid_numa_split_all_reserved_generic_check(void) { void *allocated_ptr = NULL; struct memblock_region *next_node = &memblock.memory.regions[7]; struct region r1, r2; phys_addr_t size = SZ_256; phys_addr_t max_addr; phys_addr_t min_addr; PREFIX_PUSH(); setup_numa_memblock(node_fractions); r2.base = next_node->base + SZ_128; r2.size = memblock_end_of_DRAM() - r2.base; r1.size = MEM_SIZE - (r2.size + size); r1.base = memblock_start_of_DRAM(); min_addr = r1.base + r1.size; max_addr = r2.base; memblock_reserve(r1.base, r1.size); memblock_reserve(r2.base, r2.size); allocated_ptr = run_memblock_alloc_nid(size, SMP_CACHE_BYTES, min_addr, max_addr, NUMA_NO_NODE); ASSERT_EQ(allocated_ptr, NULL); test_pass_pop(); return 0; } /* * A simple test that tries to allocate a memory region through the * memblock_alloc_node() on a NUMA node with id `nid`. Expected to have the * correct NUMA node set for the new region. */ static int alloc_node_on_correct_nid(void) { int nid_req = 2; void *allocated_ptr = NULL; #ifdef CONFIG_NUMA struct memblock_region *req_node = &memblock.memory.regions[nid_req]; #endif phys_addr_t size = SZ_512; PREFIX_PUSH(); setup_numa_memblock(node_fractions); allocated_ptr = memblock_alloc_node(size, SMP_CACHE_BYTES, nid_req); ASSERT_NE(allocated_ptr, NULL); #ifdef CONFIG_NUMA ASSERT_EQ(nid_req, req_node->nid); #endif test_pass_pop(); return 0; } /* Test case wrappers for NUMA tests */ static int alloc_nid_numa_simple_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_simple_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_simple_check(); return 0; } static int alloc_nid_numa_small_node_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_small_node_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_small_node_check(); return 0; } static int alloc_nid_numa_node_reserved_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_node_reserved_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_node_reserved_check(); return 0; } static int alloc_nid_numa_part_reserved_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_part_reserved_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_part_reserved_check(); return 0; } static int alloc_nid_numa_part_reserved_fallback_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_part_reserved_fallback_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_part_reserved_fallback_check(); return 0; } static int alloc_nid_numa_split_range_low_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_split_range_low_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_split_range_low_check(); return 0; } static int alloc_nid_numa_split_range_high_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_split_range_high_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_split_range_high_check(); return 0; } static int alloc_nid_numa_no_overlap_split_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_no_overlap_split_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_no_overlap_split_check(); return 0; } static int alloc_nid_numa_no_overlap_low_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_no_overlap_low_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_no_overlap_low_check(); return 0; } static int alloc_nid_numa_no_overlap_high_check(void) { test_print("\tRunning %s...\n", __func__); memblock_set_bottom_up(false); alloc_nid_top_down_numa_no_overlap_high_check(); memblock_set_bottom_up(true); alloc_nid_bottom_up_numa_no_overlap_high_check(); return 0; } static int alloc_nid_numa_large_region_check(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_nid_numa_large_region_generic_check); run_bottom_up(alloc_nid_numa_large_region_generic_check); return 0; } static int alloc_nid_numa_reserved_full_merge_check(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_nid_numa_reserved_full_merge_generic_check); run_bottom_up(alloc_nid_numa_reserved_full_merge_generic_check); return 0; } static int alloc_nid_numa_split_all_reserved_check(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_nid_numa_split_all_reserved_generic_check); run_bottom_up(alloc_nid_numa_split_all_reserved_generic_check); return 0; } static int alloc_node_numa_on_correct_nid(void) { test_print("\tRunning %s...\n", __func__); run_top_down(alloc_node_on_correct_nid); run_bottom_up(alloc_node_on_correct_nid); return 0; } int __memblock_alloc_nid_numa_checks(void) { test_print("Running %s NUMA tests...\n", get_memblock_alloc_nid_name(alloc_nid_test_flags)); alloc_nid_numa_simple_check(); alloc_nid_numa_small_node_check(); alloc_nid_numa_node_reserved_check(); alloc_nid_numa_part_reserved_check(); alloc_nid_numa_part_reserved_fallback_check(); alloc_nid_numa_split_range_low_check(); alloc_nid_numa_split_range_high_check(); alloc_nid_numa_no_overlap_split_check(); alloc_nid_numa_no_overlap_low_check(); alloc_nid_numa_no_overlap_high_check(); alloc_nid_numa_large_region_check(); alloc_nid_numa_reserved_full_merge_check(); alloc_nid_numa_split_all_reserved_check(); alloc_node_numa_on_correct_nid(); return 0; } static int memblock_alloc_nid_checks_internal(int flags) { alloc_nid_test_flags = flags; prefix_reset(); prefix_push(get_memblock_alloc_nid_name(flags)); reset_memblock_attributes(); dummy_physical_memory_init(); memblock_alloc_nid_range_checks(); memblock_alloc_nid_numa_checks(); dummy_physical_memory_cleanup(); prefix_pop(); return 0; } int memblock_alloc_nid_checks(void) { memblock_alloc_nid_checks_internal(TEST_F_NONE); memblock_alloc_nid_checks_internal(TEST_F_RAW); return 0; } int memblock_alloc_exact_nid_range_checks(void) { alloc_nid_test_flags = (TEST_F_RAW | TEST_F_EXACT); memblock_alloc_nid_range_checks(); return 0; }
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with Cregit http://github.com/cregit/cregit
Version 2.0-RC1