Contributors: 7
Author Tokens Token Proportion Commits Commit Proportion
Jason Gunthorpe 9249 60.85% 10 25.00%
Nicolin Chen 3493 22.98% 14 35.00%
Steve Sistare 1110 7.30% 2 5.00%
Joao Martins 1083 7.12% 11 27.50%
Yi L Liu 157 1.03% 1 2.50%
Lu Baolu 107 0.70% 1 2.50%
GuokaiXu 1 0.01% 1 2.50%
Total 15200 40

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
#include <asm/unistd.h>
#include <stdlib.h>
#include <sys/capability.h>
#include <sys/mman.h>
#include <sys/eventfd.h>

#define __EXPORTED_HEADERS__
#include <linux/vfio.h>

#include "iommufd_utils.h"

static unsigned long HUGEPAGE_SIZE;

#define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
#define MOCK_HUGE_PAGE_SIZE (512 * MOCK_PAGE_SIZE)

static unsigned long get_huge_page_size(void)
{
	char buf[80];
	int ret;
	int fd;

	fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
		  O_RDONLY);
	if (fd < 0)
		return 2 * 1024 * 1024;

	ret = read(fd, buf, sizeof(buf));
	close(fd);
	if (ret <= 0 || ret == sizeof(buf))
		return 2 * 1024 * 1024;
	buf[ret] = 0;
	return strtoul(buf, NULL, 10);
}

static __attribute__((constructor)) void setup_sizes(void)
{
	void *vrc;
	int rc;

	PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
	HUGEPAGE_SIZE = get_huge_page_size();

	BUFFER_SIZE = PAGE_SIZE * 16;
	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
	assert(!rc);
	assert(buffer);
	assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
	vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
		   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
	assert(vrc == buffer);

	mfd_buffer = memfd_mmap(BUFFER_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
				&mfd);
}

FIXTURE(iommufd)
{
	int fd;
};

FIXTURE_SETUP(iommufd)
{
	self->fd = open("/dev/iommu", O_RDWR);
	ASSERT_NE(-1, self->fd);
}

FIXTURE_TEARDOWN(iommufd)
{
	teardown_iommufd(self->fd, _metadata);
}

TEST_F(iommufd, simple_close)
{
}

TEST_F(iommufd, cmd_fail)
{
	struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };

	/* object id is invalid */
	EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
	/* Bad pointer */
	EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
	/* Unknown ioctl */
	EXPECT_ERRNO(ENOTTY,
		     ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
			   &cmd));
}

TEST_F(iommufd, cmd_length)
{
#define TEST_LENGTH(_struct, _ioctl, _last)                              \
	{                                                                \
		size_t min_size = offsetofend(struct _struct, _last);    \
		struct {                                                 \
			struct _struct cmd;                              \
			uint8_t extra;                                   \
		} cmd = { .cmd = { .size = min_size - 1 },               \
			  .extra = UINT8_MAX };                          \
		int old_errno;                                           \
		int rc;                                                  \
									 \
		EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd));     \
		cmd.cmd.size = sizeof(struct _struct) + 1;               \
		EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd));      \
		cmd.cmd.size = sizeof(struct _struct);                   \
		rc = ioctl(self->fd, _ioctl, &cmd);                      \
		old_errno = errno;                                       \
		cmd.cmd.size = sizeof(struct _struct) + 1;               \
		cmd.extra = 0;                                           \
		if (rc) {                                                \
			EXPECT_ERRNO(old_errno,                          \
				     ioctl(self->fd, _ioctl, &cmd));     \
		} else {                                                 \
			ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd));     \
		}                                                        \
	}

	TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
	TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
	TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
	TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
	TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
	TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
		    out_iova_alignment);
	TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
		    allowed_iovas);
	TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
	TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
	TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
	TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
	TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
	TEST_LENGTH(iommu_ioas_map_file, IOMMU_IOAS_MAP_FILE, iova);
	TEST_LENGTH(iommu_viommu_alloc, IOMMU_VIOMMU_ALLOC, out_viommu_id);
	TEST_LENGTH(iommu_vdevice_alloc, IOMMU_VDEVICE_ALLOC, virt_id);
	TEST_LENGTH(iommu_ioas_change_process, IOMMU_IOAS_CHANGE_PROCESS,
		    __reserved);
#undef TEST_LENGTH
}

TEST_F(iommufd, cmd_ex_fail)
{
	struct {
		struct iommu_destroy cmd;
		__u64 future;
	} cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };

	/* object id is invalid and command is longer */
	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
	/* future area is non-zero */
	cmd.future = 1;
	EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
	/* Original command "works" */
	cmd.cmd.size = sizeof(cmd.cmd);
	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
	/* Short command fails */
	cmd.cmd.size = sizeof(cmd.cmd) - 1;
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
}

TEST_F(iommufd, global_options)
{
	struct iommu_option cmd = {
		.size = sizeof(cmd),
		.option_id = IOMMU_OPTION_RLIMIT_MODE,
		.op = IOMMU_OPTION_OP_GET,
		.val64 = 1,
	};

	cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
	ASSERT_EQ(0, cmd.val64);

	/* This requires root */
	cmd.op = IOMMU_OPTION_OP_SET;
	cmd.val64 = 1;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
	cmd.val64 = 2;
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));

	cmd.op = IOMMU_OPTION_OP_GET;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
	ASSERT_EQ(1, cmd.val64);

	cmd.op = IOMMU_OPTION_OP_SET;
	cmd.val64 = 0;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));

	cmd.op = IOMMU_OPTION_OP_GET;
	cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
	cmd.op = IOMMU_OPTION_OP_SET;
	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
}

static void drop_cap_ipc_lock(struct __test_metadata *_metadata)
{
	cap_t caps;
	cap_value_t cap_list[1] = { CAP_IPC_LOCK };

	caps = cap_get_proc();
	ASSERT_NE(caps, NULL);
	ASSERT_NE(-1,
		  cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR));
	ASSERT_NE(-1, cap_set_proc(caps));
	cap_free(caps);
}

static long get_proc_status_value(pid_t pid, const char *var)
{
	FILE *fp;
	char buf[80], tag[80];
	long val = -1;

	snprintf(buf, sizeof(buf), "/proc/%d/status", pid);
	fp = fopen(buf, "r");
	if (!fp)
		return val;

	while (fgets(buf, sizeof(buf), fp))
		if (fscanf(fp, "%s %ld\n", tag, &val) == 2 && !strcmp(tag, var))
			break;

	fclose(fp);
	return val;
}

static long get_vm_pinned(pid_t pid)
{
	return get_proc_status_value(pid, "VmPin:");
}

static long get_vm_locked(pid_t pid)
{
	return get_proc_status_value(pid, "VmLck:");
}

FIXTURE(change_process)
{
	int fd;
	uint32_t ioas_id;
};

FIXTURE_VARIANT(change_process)
{
	int accounting;
};

FIXTURE_SETUP(change_process)
{
	self->fd = open("/dev/iommu", O_RDWR);
	ASSERT_NE(-1, self->fd);

	drop_cap_ipc_lock(_metadata);
	if (variant->accounting != IOPT_PAGES_ACCOUNT_NONE) {
		struct iommu_option set_limit_cmd = {
			.size = sizeof(set_limit_cmd),
			.option_id = IOMMU_OPTION_RLIMIT_MODE,
			.op = IOMMU_OPTION_OP_SET,
			.val64 = (variant->accounting == IOPT_PAGES_ACCOUNT_MM),
		};
		ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &set_limit_cmd));
	}

	test_ioctl_ioas_alloc(&self->ioas_id);
	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
}

FIXTURE_TEARDOWN(change_process)
{
	teardown_iommufd(self->fd, _metadata);
}

FIXTURE_VARIANT_ADD(change_process, account_none)
{
	.accounting = IOPT_PAGES_ACCOUNT_NONE,
};

FIXTURE_VARIANT_ADD(change_process, account_user)
{
	.accounting = IOPT_PAGES_ACCOUNT_USER,
};

FIXTURE_VARIANT_ADD(change_process, account_mm)
{
	.accounting = IOPT_PAGES_ACCOUNT_MM,
};

TEST_F(change_process, basic)
{
	pid_t parent = getpid();
	pid_t child;
	__u64 iova;
	struct iommu_ioas_change_process cmd = {
		.size = sizeof(cmd),
	};

	/* Expect failure if non-file maps exist */
	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));
	test_ioctl_ioas_unmap(iova, PAGE_SIZE);

	/* Change process works in current process. */
	test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));

	/* Change process works in another process */
	child = fork();
	if (!child) {
		int nlock = PAGE_SIZE / 1024;

		/* Parent accounts for locked memory before */
		ASSERT_EQ(nlock, get_vm_pinned(parent));
		if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
			ASSERT_EQ(nlock, get_vm_locked(parent));
		ASSERT_EQ(0, get_vm_pinned(getpid()));
		ASSERT_EQ(0, get_vm_locked(getpid()));

		ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_CHANGE_PROCESS, &cmd));

		/* Child accounts for locked memory after */
		ASSERT_EQ(0, get_vm_pinned(parent));
		ASSERT_EQ(0, get_vm_locked(parent));
		ASSERT_EQ(nlock, get_vm_pinned(getpid()));
		if (variant->accounting == IOPT_PAGES_ACCOUNT_MM)
			ASSERT_EQ(nlock, get_vm_locked(getpid()));

		exit(0);
	}
	ASSERT_NE(-1, child);
	ASSERT_EQ(child, waitpid(child, NULL, 0));
}

FIXTURE(iommufd_ioas)
{
	int fd;
	uint32_t ioas_id;
	uint32_t stdev_id;
	uint32_t hwpt_id;
	uint32_t device_id;
	uint64_t base_iova;
};

FIXTURE_VARIANT(iommufd_ioas)
{
	unsigned int mock_domains;
	unsigned int memory_limit;
};

FIXTURE_SETUP(iommufd_ioas)
{
	unsigned int i;


	self->fd = open("/dev/iommu", O_RDWR);
	ASSERT_NE(-1, self->fd);
	test_ioctl_ioas_alloc(&self->ioas_id);

	if (!variant->memory_limit) {
		test_ioctl_set_default_memory_limit();
	} else {
		test_ioctl_set_temp_memory_limit(variant->memory_limit);
	}

	for (i = 0; i != variant->mock_domains; i++) {
		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
				     &self->hwpt_id, &self->device_id);
		test_cmd_dev_check_cache_all(self->device_id,
					     IOMMU_TEST_DEV_CACHE_DEFAULT);
		self->base_iova = MOCK_APERTURE_START;
	}
}

FIXTURE_TEARDOWN(iommufd_ioas)
{
	test_ioctl_set_default_memory_limit();
	teardown_iommufd(self->fd, _metadata);
}

FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
{
};

FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
{
	.mock_domains = 1,
};

FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
{
	.mock_domains = 2,
};

FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
{
	.mock_domains = 1,
	.memory_limit = 16,
};

TEST_F(iommufd_ioas, ioas_auto_destroy)
{
}

TEST_F(iommufd_ioas, ioas_destroy)
{
	if (self->stdev_id) {
		/* IOAS cannot be freed while a device has a HWPT using it */
		EXPECT_ERRNO(EBUSY,
			     _test_ioctl_destroy(self->fd, self->ioas_id));
	} else {
		/* Can allocate and manually free an IOAS table */
		test_ioctl_destroy(self->ioas_id);
	}
}

TEST_F(iommufd_ioas, alloc_hwpt_nested)
{
	const uint32_t min_data_len =
		offsetofend(struct iommu_hwpt_selftest, iotlb);
	struct iommu_hwpt_selftest data = {
		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
	};
	struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
	uint32_t nested_hwpt_id[2] = {};
	uint32_t num_inv;
	uint32_t parent_hwpt_id = 0;
	uint32_t parent_hwpt_id_not_work = 0;
	uint32_t test_hwpt_id = 0;
	uint32_t iopf_hwpt_id;
	uint32_t fault_id;
	uint32_t fault_fd;

	if (self->device_id) {
		/* Negative tests */
		test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
				    &test_hwpt_id);
		test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
				    &test_hwpt_id);

		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
				    IOMMU_HWPT_ALLOC_NEST_PARENT,
				    &parent_hwpt_id);

		test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
				    &parent_hwpt_id_not_work);

		/* Negative nested tests */
		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
					   parent_hwpt_id, 0,
					   &nested_hwpt_id[0],
					   IOMMU_HWPT_DATA_NONE, &data,
					   sizeof(data));
		test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
					   parent_hwpt_id, 0,
					   &nested_hwpt_id[0],
					   IOMMU_HWPT_DATA_SELFTEST + 1, &data,
					   sizeof(data));
		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
					   parent_hwpt_id, 0,
					   &nested_hwpt_id[0],
					   IOMMU_HWPT_DATA_SELFTEST, &data,
					   min_data_len - 1);
		test_err_hwpt_alloc_nested(EFAULT, self->device_id,
					   parent_hwpt_id, 0,
					   &nested_hwpt_id[0],
					   IOMMU_HWPT_DATA_SELFTEST, NULL,
					   sizeof(data));
		test_err_hwpt_alloc_nested(
			EOPNOTSUPP, self->device_id, parent_hwpt_id,
			IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
					   parent_hwpt_id_not_work, 0,
					   &nested_hwpt_id[0],
					   IOMMU_HWPT_DATA_SELFTEST, &data,
					   sizeof(data));

		/* Allocate two nested hwpts sharing one common parent hwpt */
		test_ioctl_fault_alloc(&fault_id, &fault_fd);
		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
					   &nested_hwpt_id[0],
					   IOMMU_HWPT_DATA_SELFTEST, &data,
					   sizeof(data));
		test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
					   &nested_hwpt_id[1],
					   IOMMU_HWPT_DATA_SELFTEST, &data,
					   sizeof(data));
		test_err_hwpt_alloc_iopf(ENOENT, self->device_id, parent_hwpt_id,
					 UINT32_MAX, IOMMU_HWPT_FAULT_ID_VALID,
					 &iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST,
					 &data, sizeof(data));
		test_cmd_hwpt_alloc_iopf(self->device_id, parent_hwpt_id, fault_id,
					 IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
					 IOMMU_HWPT_DATA_SELFTEST, &data,
					 sizeof(data));
		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
					      IOMMU_TEST_IOTLB_DEFAULT);
		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
					      IOMMU_TEST_IOTLB_DEFAULT);

		/* Negative test: a nested hwpt on top of a nested hwpt */
		test_err_hwpt_alloc_nested(EINVAL, self->device_id,
					   nested_hwpt_id[0], 0, &test_hwpt_id,
					   IOMMU_HWPT_DATA_SELFTEST, &data,
					   sizeof(data));
		/* Negative test: parent hwpt now cannot be freed */
		EXPECT_ERRNO(EBUSY,
			     _test_ioctl_destroy(self->fd, parent_hwpt_id));

		/* hwpt_invalidate does not support a parent hwpt */
		num_inv = 1;
		test_err_hwpt_invalidate(EINVAL, parent_hwpt_id, inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Check data_type by passing zero-length array */
		num_inv = 0;
		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Negative test: Invalid data_type */
		num_inv = 1;
		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
					 sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Negative test: structure size sanity */
		num_inv = 1;
		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs) + 1, &num_inv);
		assert(!num_inv);

		num_inv = 1;
		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 1, &num_inv);
		assert(!num_inv);

		/* Negative test: invalid flag is passed */
		num_inv = 1;
		inv_reqs[0].flags = 0xffffffff;
		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Negative test: invalid data_uptr when array is not empty */
		num_inv = 1;
		inv_reqs[0].flags = 0;
		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Negative test: invalid entry_len when array is not empty */
		num_inv = 1;
		inv_reqs[0].flags = 0;
		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 0, &num_inv);
		assert(!num_inv);

		/* Negative test: invalid iotlb_id */
		num_inv = 1;
		inv_reqs[0].flags = 0;
		inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/*
		 * Invalidate the 1st iotlb entry but fail the 2nd request
		 * due to invalid flags configuration in the 2nd request.
		 */
		num_inv = 2;
		inv_reqs[0].flags = 0;
		inv_reqs[0].iotlb_id = 0;
		inv_reqs[1].flags = 0xffffffff;
		inv_reqs[1].iotlb_id = 1;
		test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 1);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
					  IOMMU_TEST_IOTLB_DEFAULT);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
					  IOMMU_TEST_IOTLB_DEFAULT);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
					  IOMMU_TEST_IOTLB_DEFAULT);

		/*
		 * Invalidate the 1st iotlb entry but fail the 2nd request
		 * due to invalid iotlb_id configuration in the 2nd request.
		 */
		num_inv = 2;
		inv_reqs[0].flags = 0;
		inv_reqs[0].iotlb_id = 0;
		inv_reqs[1].flags = 0;
		inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
		test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 1);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
					  IOMMU_TEST_IOTLB_DEFAULT);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
					  IOMMU_TEST_IOTLB_DEFAULT);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
					  IOMMU_TEST_IOTLB_DEFAULT);

		/* Invalidate the 2nd iotlb entry and verify */
		num_inv = 1;
		inv_reqs[0].flags = 0;
		inv_reqs[0].iotlb_id = 1;
		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 1);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
					  IOMMU_TEST_IOTLB_DEFAULT);
		test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
					  IOMMU_TEST_IOTLB_DEFAULT);

		/* Invalidate the 3rd and 4th iotlb entries and verify */
		num_inv = 2;
		inv_reqs[0].flags = 0;
		inv_reqs[0].iotlb_id = 2;
		inv_reqs[1].flags = 0;
		inv_reqs[1].iotlb_id = 3;
		test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 2);
		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);

		/* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
		num_inv = 1;
		inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
		test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
					 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
					 sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 1);
		test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);

		/* Attach device to nested_hwpt_id[0] that then will be busy */
		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
		EXPECT_ERRNO(EBUSY,
			     _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));

		/* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
		test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
		EXPECT_ERRNO(EBUSY,
			     _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
		test_ioctl_destroy(nested_hwpt_id[0]);

		/* Switch from nested_hwpt_id[1] to iopf_hwpt_id */
		test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
		EXPECT_ERRNO(EBUSY,
			     _test_ioctl_destroy(self->fd, iopf_hwpt_id));
		/* Trigger an IOPF on the device */
		test_cmd_trigger_iopf(self->device_id, fault_fd);

		/* Detach from nested_hwpt_id[1] and destroy it */
		test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
		test_ioctl_destroy(nested_hwpt_id[1]);
		test_ioctl_destroy(iopf_hwpt_id);

		/* Detach from the parent hw_pagetable and destroy it */
		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
		test_ioctl_destroy(parent_hwpt_id);
		test_ioctl_destroy(parent_hwpt_id_not_work);
		close(fault_fd);
		test_ioctl_destroy(fault_id);
	} else {
		test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
				    &parent_hwpt_id);
		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
					   parent_hwpt_id, 0,
					   &nested_hwpt_id[0],
					   IOMMU_HWPT_DATA_SELFTEST, &data,
					   sizeof(data));
		test_err_hwpt_alloc_nested(ENOENT, self->device_id,
					   parent_hwpt_id, 0,
					   &nested_hwpt_id[1],
					   IOMMU_HWPT_DATA_SELFTEST, &data,
					   sizeof(data));
		test_err_mock_domain_replace(ENOENT, self->stdev_id,
					     nested_hwpt_id[0]);
		test_err_mock_domain_replace(ENOENT, self->stdev_id,
					     nested_hwpt_id[1]);
	}
}

TEST_F(iommufd_ioas, hwpt_attach)
{
	/* Create a device attached directly to a hwpt */
	if (self->stdev_id) {
		test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
	} else {
		test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
	}
}

TEST_F(iommufd_ioas, ioas_area_destroy)
{
	/* Adding an area does not change ability to destroy */
	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
	if (self->stdev_id)
		EXPECT_ERRNO(EBUSY,
			     _test_ioctl_destroy(self->fd, self->ioas_id));
	else
		test_ioctl_destroy(self->ioas_id);
}

TEST_F(iommufd_ioas, ioas_area_auto_destroy)
{
	int i;

	/* Can allocate and automatically free an IOAS table with many areas */
	for (i = 0; i != 10; i++) {
		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
					  self->base_iova + i * PAGE_SIZE);
	}
}

TEST_F(iommufd_ioas, get_hw_info)
{
	struct iommu_test_hw_info buffer_exact;
	struct iommu_test_hw_info_buffer_larger {
		struct iommu_test_hw_info info;
		uint64_t trailing_bytes;
	} buffer_larger;
	struct iommu_test_hw_info_buffer_smaller {
		__u32 flags;
	} buffer_smaller;

	if (self->device_id) {
		/* Provide a zero-size user_buffer */
		test_cmd_get_hw_info(self->device_id, NULL, 0);
		/* Provide a user_buffer with exact size */
		test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
		/*
		 * Provide a user_buffer with size larger than the exact size to check if
		 * kernel zero the trailing bytes.
		 */
		test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
		/*
		 * Provide a user_buffer with size smaller than the exact size to check if
		 * the fields within the size range still gets updated.
		 */
		test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
	} else {
		test_err_get_hw_info(ENOENT, self->device_id,
				     &buffer_exact, sizeof(buffer_exact));
		test_err_get_hw_info(ENOENT, self->device_id,
				     &buffer_larger, sizeof(buffer_larger));
	}
}

TEST_F(iommufd_ioas, area)
{
	int i;

	/* Unmap fails if nothing is mapped */
	for (i = 0; i != 10; i++)
		test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);

	/* Unmap works */
	for (i = 0; i != 10; i++)
		test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
					  self->base_iova + i * PAGE_SIZE);
	for (i = 0; i != 10; i++)
		test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
				      PAGE_SIZE);

	/* Split fails */
	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
				  self->base_iova + 16 * PAGE_SIZE);
	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
				  PAGE_SIZE);
	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
				  PAGE_SIZE);

	/* Over map fails */
	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
				      self->base_iova + 16 * PAGE_SIZE);
	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
				      self->base_iova + 16 * PAGE_SIZE);
	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
				      self->base_iova + 17 * PAGE_SIZE);
	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
				      self->base_iova + 15 * PAGE_SIZE);
	test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
				      self->base_iova + 15 * PAGE_SIZE);

	/* unmap all works */
	test_ioctl_ioas_unmap(0, UINT64_MAX);

	/* Unmap all succeeds on an empty IOAS */
	test_ioctl_ioas_unmap(0, UINT64_MAX);
}

TEST_F(iommufd_ioas, unmap_fully_contained_areas)
{
	uint64_t unmap_len;
	int i;

	/* Give no_domain some space to rewind base_iova */
	self->base_iova += 4 * PAGE_SIZE;

	for (i = 0; i != 4; i++)
		test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
					  self->base_iova + i * 16 * PAGE_SIZE);

	/* Unmap not fully contained area doesn't work */
	test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
				  8 * PAGE_SIZE);
	test_err_ioctl_ioas_unmap(ENOENT,
				  self->base_iova + 3 * 16 * PAGE_SIZE +
					  8 * PAGE_SIZE - 4 * PAGE_SIZE,
				  8 * PAGE_SIZE);

	/* Unmap fully contained areas works */
	ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
					    self->base_iova - 4 * PAGE_SIZE,
					    3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
						    4 * PAGE_SIZE,
					    &unmap_len));
	ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
}

TEST_F(iommufd_ioas, area_auto_iova)
{
	struct iommu_test_cmd test_cmd = {
		.size = sizeof(test_cmd),
		.op = IOMMU_TEST_OP_ADD_RESERVED,
		.id = self->ioas_id,
		.add_reserved = { .start = PAGE_SIZE * 4,
				  .length = PAGE_SIZE * 100 },
	};
	struct iommu_iova_range ranges[1] = {};
	struct iommu_ioas_allow_iovas allow_cmd = {
		.size = sizeof(allow_cmd),
		.ioas_id = self->ioas_id,
		.num_iovas = 1,
		.allowed_iovas = (uintptr_t)ranges,
	};
	__u64 iovas[10];
	int i;

	/* Simple 4k pages */
	for (i = 0; i != 10; i++)
		test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
	for (i = 0; i != 10; i++)
		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);

	/* Kernel automatically aligns IOVAs properly */
	for (i = 0; i != 10; i++) {
		size_t length = PAGE_SIZE * (i + 1);

		if (self->stdev_id) {
			test_ioctl_ioas_map(buffer, length, &iovas[i]);
		} else {
			test_ioctl_ioas_map((void *)(1UL << 31), length,
					    &iovas[i]);
		}
		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
	}
	for (i = 0; i != 10; i++)
		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));

	/* Avoids a reserved region */
	ASSERT_EQ(0,
		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
			&test_cmd));
	for (i = 0; i != 10; i++) {
		size_t length = PAGE_SIZE * (i + 1);

		test_ioctl_ioas_map(buffer, length, &iovas[i]);
		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
		EXPECT_EQ(false,
			  iovas[i] > test_cmd.add_reserved.start &&
				  iovas[i] <
					  test_cmd.add_reserved.start +
						  test_cmd.add_reserved.length);
	}
	for (i = 0; i != 10; i++)
		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));

	/* Allowed region intersects with a reserved region */
	ranges[0].start = PAGE_SIZE;
	ranges[0].last = PAGE_SIZE * 600;
	EXPECT_ERRNO(EADDRINUSE,
		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));

	/* Allocate from an allowed region */
	if (self->stdev_id) {
		ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
		ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
	} else {
		ranges[0].start = PAGE_SIZE * 200;
		ranges[0].last = PAGE_SIZE * 600 - 1;
	}
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
	for (i = 0; i != 10; i++) {
		size_t length = PAGE_SIZE * (i + 1);

		test_ioctl_ioas_map(buffer, length, &iovas[i]);
		EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
		EXPECT_EQ(true, iovas[i] >= ranges[0].start);
		EXPECT_EQ(true, iovas[i] <= ranges[0].last);
		EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
		EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
	}
	for (i = 0; i != 10; i++)
		test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
}

TEST_F(iommufd_ioas, area_allowed)
{
	struct iommu_test_cmd test_cmd = {
		.size = sizeof(test_cmd),
		.op = IOMMU_TEST_OP_ADD_RESERVED,
		.id = self->ioas_id,
		.add_reserved = { .start = PAGE_SIZE * 4,
				  .length = PAGE_SIZE * 100 },
	};
	struct iommu_iova_range ranges[1] = {};
	struct iommu_ioas_allow_iovas allow_cmd = {
		.size = sizeof(allow_cmd),
		.ioas_id = self->ioas_id,
		.num_iovas = 1,
		.allowed_iovas = (uintptr_t)ranges,
	};

	/* Reserved intersects an allowed */
	allow_cmd.num_iovas = 1;
	ranges[0].start = self->base_iova;
	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
	test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
	test_cmd.add_reserved.length = PAGE_SIZE;
	EXPECT_ERRNO(EADDRINUSE,
		     ioctl(self->fd,
			   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
			   &test_cmd));
	allow_cmd.num_iovas = 0;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));

	/* Allowed intersects a reserved */
	ASSERT_EQ(0,
		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
			&test_cmd));
	allow_cmd.num_iovas = 1;
	ranges[0].start = self->base_iova;
	ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
	EXPECT_ERRNO(EADDRINUSE,
		     ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
}

TEST_F(iommufd_ioas, copy_area)
{
	struct iommu_ioas_copy copy_cmd = {
		.size = sizeof(copy_cmd),
		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
		.dst_ioas_id = self->ioas_id,
		.src_ioas_id = self->ioas_id,
		.length = PAGE_SIZE,
	};

	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);

	/* Copy inside a single IOAS */
	copy_cmd.src_iova = self->base_iova;
	copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));

	/* Copy between IOAS's */
	copy_cmd.src_iova = self->base_iova;
	copy_cmd.dst_iova = 0;
	test_ioctl_ioas_alloc(&copy_cmd.dst_ioas_id);
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
}

TEST_F(iommufd_ioas, iova_ranges)
{
	struct iommu_test_cmd test_cmd = {
		.size = sizeof(test_cmd),
		.op = IOMMU_TEST_OP_ADD_RESERVED,
		.id = self->ioas_id,
		.add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
	};
	struct iommu_iova_range *ranges = buffer;
	struct iommu_ioas_iova_ranges ranges_cmd = {
		.size = sizeof(ranges_cmd),
		.ioas_id = self->ioas_id,
		.num_iovas = BUFFER_SIZE / sizeof(*ranges),
		.allowed_iovas = (uintptr_t)ranges,
	};

	/* Range can be read */
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
	EXPECT_EQ(1, ranges_cmd.num_iovas);
	if (!self->stdev_id) {
		EXPECT_EQ(0, ranges[0].start);
		EXPECT_EQ(SIZE_MAX, ranges[0].last);
		EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
	} else {
		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
		EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
	}

	/* Buffer too small */
	memset(ranges, 0, BUFFER_SIZE);
	ranges_cmd.num_iovas = 0;
	EXPECT_ERRNO(EMSGSIZE,
		     ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
	EXPECT_EQ(1, ranges_cmd.num_iovas);
	EXPECT_EQ(0, ranges[0].start);
	EXPECT_EQ(0, ranges[0].last);

	/* 2 ranges */
	ASSERT_EQ(0,
		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
			&test_cmd));
	ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
	if (!self->stdev_id) {
		EXPECT_EQ(2, ranges_cmd.num_iovas);
		EXPECT_EQ(0, ranges[0].start);
		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
		EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
		EXPECT_EQ(SIZE_MAX, ranges[1].last);
	} else {
		EXPECT_EQ(1, ranges_cmd.num_iovas);
		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
	}

	/* Buffer too small */
	memset(ranges, 0, BUFFER_SIZE);
	ranges_cmd.num_iovas = 1;
	if (!self->stdev_id) {
		EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
					     &ranges_cmd));
		EXPECT_EQ(2, ranges_cmd.num_iovas);
		EXPECT_EQ(0, ranges[0].start);
		EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
	} else {
		ASSERT_EQ(0,
			  ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
		EXPECT_EQ(1, ranges_cmd.num_iovas);
		EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
		EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
	}
	EXPECT_EQ(0, ranges[1].start);
	EXPECT_EQ(0, ranges[1].last);
}

TEST_F(iommufd_ioas, access_domain_destory)
{
	struct iommu_test_cmd access_cmd = {
		.size = sizeof(access_cmd),
		.op = IOMMU_TEST_OP_ACCESS_PAGES,
		.access_pages = { .iova = self->base_iova + PAGE_SIZE,
				  .length = PAGE_SIZE},
	};
	size_t buf_size = 2 * HUGEPAGE_SIZE;
	uint8_t *buf;

	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
		   0);
	ASSERT_NE(MAP_FAILED, buf);
	test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);

	test_cmd_create_access(self->ioas_id, &access_cmd.id,
			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
	access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
	ASSERT_EQ(0,
		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
			&access_cmd));

	/* Causes a complicated unpin across a huge page boundary */
	if (self->stdev_id)
		test_ioctl_destroy(self->stdev_id);

	test_cmd_destroy_access_pages(
		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
	test_cmd_destroy_access(access_cmd.id);
	ASSERT_EQ(0, munmap(buf, buf_size));
}

TEST_F(iommufd_ioas, access_pin)
{
	struct iommu_test_cmd access_cmd = {
		.size = sizeof(access_cmd),
		.op = IOMMU_TEST_OP_ACCESS_PAGES,
		.access_pages = { .iova = MOCK_APERTURE_START,
				  .length = BUFFER_SIZE,
				  .uptr = (uintptr_t)buffer },
	};
	struct iommu_test_cmd check_map_cmd = {
		.size = sizeof(check_map_cmd),
		.op = IOMMU_TEST_OP_MD_CHECK_MAP,
		.check_map = { .iova = MOCK_APERTURE_START,
			       .length = BUFFER_SIZE,
			       .uptr = (uintptr_t)buffer },
	};
	uint32_t access_pages_id;
	unsigned int npages;

	test_cmd_create_access(self->ioas_id, &access_cmd.id,
			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);

	for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
		uint32_t mock_stdev_id;
		uint32_t mock_hwpt_id;

		access_cmd.access_pages.length = npages * PAGE_SIZE;

		/* Single map/unmap */
		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
					  MOCK_APERTURE_START);
		ASSERT_EQ(0, ioctl(self->fd,
				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
				   &access_cmd));
		test_cmd_destroy_access_pages(
			access_cmd.id,
			access_cmd.access_pages.out_access_pages_id);

		/* Double user */
		ASSERT_EQ(0, ioctl(self->fd,
				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
				   &access_cmd));
		access_pages_id = access_cmd.access_pages.out_access_pages_id;
		ASSERT_EQ(0, ioctl(self->fd,
				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
				   &access_cmd));
		test_cmd_destroy_access_pages(
			access_cmd.id,
			access_cmd.access_pages.out_access_pages_id);
		test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);

		/* Add/remove a domain with a user */
		ASSERT_EQ(0, ioctl(self->fd,
				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
				   &access_cmd));
		test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
				     &mock_hwpt_id, NULL);
		check_map_cmd.id = mock_hwpt_id;
		ASSERT_EQ(0, ioctl(self->fd,
				   _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
				   &check_map_cmd));

		test_ioctl_destroy(mock_stdev_id);
		test_cmd_destroy_access_pages(
			access_cmd.id,
			access_cmd.access_pages.out_access_pages_id);

		test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
	}
	test_cmd_destroy_access(access_cmd.id);
}

TEST_F(iommufd_ioas, access_pin_unmap)
{
	struct iommu_test_cmd access_pages_cmd = {
		.size = sizeof(access_pages_cmd),
		.op = IOMMU_TEST_OP_ACCESS_PAGES,
		.access_pages = { .iova = MOCK_APERTURE_START,
				  .length = BUFFER_SIZE,
				  .uptr = (uintptr_t)buffer },
	};

	test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
	ASSERT_EQ(0,
		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
			&access_pages_cmd));

	/* Trigger the unmap op */
	test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);

	/* kernel removed the item for us */
	test_err_destroy_access_pages(
		ENOENT, access_pages_cmd.id,
		access_pages_cmd.access_pages.out_access_pages_id);
}

static void check_access_rw(struct __test_metadata *_metadata, int fd,
			    unsigned int access_id, uint64_t iova,
			    unsigned int def_flags)
{
	uint16_t tmp[32];
	struct iommu_test_cmd access_cmd = {
		.size = sizeof(access_cmd),
		.op = IOMMU_TEST_OP_ACCESS_RW,
		.id = access_id,
		.access_rw = { .uptr = (uintptr_t)tmp },
	};
	uint16_t *buffer16 = buffer;
	unsigned int i;
	void *tmp2;

	for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
		buffer16[i] = rand();

	for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
	     access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
	     access_cmd.access_rw.iova++) {
		for (access_cmd.access_rw.length = 1;
		     access_cmd.access_rw.length < sizeof(tmp);
		     access_cmd.access_rw.length++) {
			access_cmd.access_rw.flags = def_flags;
			ASSERT_EQ(0, ioctl(fd,
					   _IOMMU_TEST_CMD(
						   IOMMU_TEST_OP_ACCESS_RW),
					   &access_cmd));
			ASSERT_EQ(0,
				  memcmp(buffer + (access_cmd.access_rw.iova -
						   iova),
					 tmp, access_cmd.access_rw.length));

			for (i = 0; i != ARRAY_SIZE(tmp); i++)
				tmp[i] = rand();
			access_cmd.access_rw.flags = def_flags |
						     MOCK_ACCESS_RW_WRITE;
			ASSERT_EQ(0, ioctl(fd,
					   _IOMMU_TEST_CMD(
						   IOMMU_TEST_OP_ACCESS_RW),
					   &access_cmd));
			ASSERT_EQ(0,
				  memcmp(buffer + (access_cmd.access_rw.iova -
						   iova),
					 tmp, access_cmd.access_rw.length));
		}
	}

	/* Multi-page test */
	tmp2 = malloc(BUFFER_SIZE);
	ASSERT_NE(NULL, tmp2);
	access_cmd.access_rw.iova = iova;
	access_cmd.access_rw.length = BUFFER_SIZE;
	access_cmd.access_rw.flags = def_flags;
	access_cmd.access_rw.uptr = (uintptr_t)tmp2;
	ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
			   &access_cmd));
	ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
	free(tmp2);
}

TEST_F(iommufd_ioas, access_rw)
{
	__u32 access_id;
	__u64 iova;

	test_cmd_create_access(self->ioas_id, &access_id, 0);
	test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
	check_access_rw(_metadata, self->fd, access_id, iova, 0);
	check_access_rw(_metadata, self->fd, access_id, iova,
			MOCK_ACCESS_RW_SLOW_PATH);
	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
	test_cmd_destroy_access(access_id);
}

TEST_F(iommufd_ioas, access_rw_unaligned)
{
	__u32 access_id;
	__u64 iova;

	test_cmd_create_access(self->ioas_id, &access_id, 0);

	/* Unaligned pages */
	iova = self->base_iova + MOCK_PAGE_SIZE;
	test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
	check_access_rw(_metadata, self->fd, access_id, iova, 0);
	test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
	test_cmd_destroy_access(access_id);
}

TEST_F(iommufd_ioas, fork_gone)
{
	__u32 access_id;
	pid_t child;

	test_cmd_create_access(self->ioas_id, &access_id, 0);

	/* Create a mapping with a different mm */
	child = fork();
	if (!child) {
		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
					  MOCK_APERTURE_START);
		exit(0);
	}
	ASSERT_NE(-1, child);
	ASSERT_EQ(child, waitpid(child, NULL, 0));

	if (self->stdev_id) {
		/*
		 * If a domain already existed then everything was pinned within
		 * the fork, so this copies from one domain to another.
		 */
		test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
		check_access_rw(_metadata, self->fd, access_id,
				MOCK_APERTURE_START, 0);

	} else {
		/*
		 * Otherwise we need to actually pin pages which can't happen
		 * since the fork is gone.
		 */
		test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
	}

	test_cmd_destroy_access(access_id);
}

TEST_F(iommufd_ioas, fork_present)
{
	__u32 access_id;
	int pipefds[2];
	uint64_t tmp;
	pid_t child;
	int efd;

	test_cmd_create_access(self->ioas_id, &access_id, 0);

	ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
	efd = eventfd(0, EFD_CLOEXEC);
	ASSERT_NE(-1, efd);

	/* Create a mapping with a different mm */
	child = fork();
	if (!child) {
		__u64 iova;
		uint64_t one = 1;

		close(pipefds[1]);
		test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
					  MOCK_APERTURE_START);
		if (write(efd, &one, sizeof(one)) != sizeof(one))
			exit(100);
		if (read(pipefds[0], &iova, 1) != 1)
			exit(100);
		exit(0);
	}
	close(pipefds[0]);
	ASSERT_NE(-1, child);
	ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));

	/* Read pages from the remote process */
	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
	check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);

	ASSERT_EQ(0, close(pipefds[1]));
	ASSERT_EQ(child, waitpid(child, NULL, 0));

	test_cmd_destroy_access(access_id);
}

TEST_F(iommufd_ioas, ioas_option_huge_pages)
{
	struct iommu_option cmd = {
		.size = sizeof(cmd),
		.option_id = IOMMU_OPTION_HUGE_PAGES,
		.op = IOMMU_OPTION_OP_GET,
		.val64 = 3,
		.object_id = self->ioas_id,
	};

	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
	ASSERT_EQ(1, cmd.val64);

	cmd.op = IOMMU_OPTION_OP_SET;
	cmd.val64 = 0;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));

	cmd.op = IOMMU_OPTION_OP_GET;
	cmd.val64 = 3;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
	ASSERT_EQ(0, cmd.val64);

	cmd.op = IOMMU_OPTION_OP_SET;
	cmd.val64 = 2;
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));

	cmd.op = IOMMU_OPTION_OP_SET;
	cmd.val64 = 1;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
}

TEST_F(iommufd_ioas, ioas_iova_alloc)
{
	unsigned int length;
	__u64 iova;

	for (length = 1; length != PAGE_SIZE * 2; length++) {
		if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
			test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
		} else {
			test_ioctl_ioas_map(buffer, length, &iova);
			test_ioctl_ioas_unmap(iova, length);
		}
	}
}

TEST_F(iommufd_ioas, ioas_align_change)
{
	struct iommu_option cmd = {
		.size = sizeof(cmd),
		.option_id = IOMMU_OPTION_HUGE_PAGES,
		.op = IOMMU_OPTION_OP_SET,
		.object_id = self->ioas_id,
		/* 0 means everything must be aligned to PAGE_SIZE */
		.val64 = 0,
	};

	/*
	 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
	 * and map are present.
	 */
	if (variant->mock_domains)
		return;

	/*
	 * We can upgrade to PAGE_SIZE alignment when things are aligned right
	 */
	test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));

	/* Misalignment is rejected at map time */
	test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
				      PAGE_SIZE,
				      MOCK_APERTURE_START + PAGE_SIZE);
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));

	/* Reduce alignment */
	cmd.val64 = 1;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));

	/* Confirm misalignment is rejected during alignment upgrade */
	test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
				  MOCK_APERTURE_START + PAGE_SIZE);
	cmd.val64 = 0;
	EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));

	test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
	test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
}

TEST_F(iommufd_ioas, copy_sweep)
{
	struct iommu_ioas_copy copy_cmd = {
		.size = sizeof(copy_cmd),
		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
		.src_ioas_id = self->ioas_id,
		.dst_iova = MOCK_APERTURE_START,
		.length = MOCK_PAGE_SIZE,
	};
	unsigned int dst_ioas_id;
	uint64_t last_iova;
	uint64_t iova;

	test_ioctl_ioas_alloc(&dst_ioas_id);
	copy_cmd.dst_ioas_id = dst_ioas_id;

	if (variant->mock_domains)
		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
	else
		last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;

	test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
				  MOCK_APERTURE_START);

	for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
	     iova += 511) {
		copy_cmd.src_iova = iova;
		if (iova < MOCK_APERTURE_START ||
		    iova + copy_cmd.length - 1 > last_iova) {
			EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
						   &copy_cmd));
		} else {
			ASSERT_EQ(0,
				  ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
			test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
						 copy_cmd.length);
		}
	}

	test_ioctl_destroy(dst_ioas_id);
}

FIXTURE(iommufd_mock_domain)
{
	int fd;
	uint32_t ioas_id;
	uint32_t hwpt_id;
	uint32_t hwpt_ids[2];
	uint32_t stdev_ids[2];
	uint32_t idev_ids[2];
	int mmap_flags;
	size_t mmap_buf_size;
};

FIXTURE_VARIANT(iommufd_mock_domain)
{
	unsigned int mock_domains;
	bool hugepages;
	bool file;
};

FIXTURE_SETUP(iommufd_mock_domain)
{
	unsigned int i;

	self->fd = open("/dev/iommu", O_RDWR);
	ASSERT_NE(-1, self->fd);
	test_ioctl_ioas_alloc(&self->ioas_id);

	ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);

	for (i = 0; i != variant->mock_domains; i++) {
		test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
				     &self->hwpt_ids[i], &self->idev_ids[i]);
		test_cmd_dev_check_cache_all(self->idev_ids[0],
					     IOMMU_TEST_DEV_CACHE_DEFAULT);
	}
	self->hwpt_id = self->hwpt_ids[0];

	self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
	self->mmap_buf_size = PAGE_SIZE * 8;
	if (variant->hugepages) {
		/*
		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
		 * not available.
		 */
		self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
		self->mmap_buf_size = HUGEPAGE_SIZE * 2;
	}
}

FIXTURE_TEARDOWN(iommufd_mock_domain)
{
	teardown_iommufd(self->fd, _metadata);
}

FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
{
	.mock_domains = 1,
	.hugepages = false,
	.file = false,
};

FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
{
	.mock_domains = 2,
	.hugepages = false,
	.file = false,
};

FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
{
	.mock_domains = 1,
	.hugepages = true,
	.file = false,
};

FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
{
	.mock_domains = 2,
	.hugepages = true,
	.file = false,
};

FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file)
{
	.mock_domains = 1,
	.hugepages = false,
	.file = true,
};

FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_file_hugepage)
{
	.mock_domains = 1,
	.hugepages = true,
	.file = true,
};


/* Have the kernel check that the user pages made it to the iommu_domain */
#define check_mock_iova(_ptr, _iova, _length)                                \
	({                                                                   \
		struct iommu_test_cmd check_map_cmd = {                      \
			.size = sizeof(check_map_cmd),                       \
			.op = IOMMU_TEST_OP_MD_CHECK_MAP,                    \
			.id = self->hwpt_id,                                 \
			.check_map = { .iova = _iova,                        \
				       .length = _length,                    \
				       .uptr = (uintptr_t)(_ptr) },          \
		};                                                           \
		ASSERT_EQ(0,                                                 \
			  ioctl(self->fd,                                    \
				_IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
				&check_map_cmd));                            \
		if (self->hwpt_ids[1]) {                                     \
			check_map_cmd.id = self->hwpt_ids[1];                \
			ASSERT_EQ(0,                                         \
				  ioctl(self->fd,                            \
					_IOMMU_TEST_CMD(                     \
						IOMMU_TEST_OP_MD_CHECK_MAP), \
					&check_map_cmd));                    \
		}                                                            \
	})

static void
test_basic_mmap(struct __test_metadata *_metadata,
		struct _test_data_iommufd_mock_domain *self,
		const struct _fixture_variant_iommufd_mock_domain *variant)
{
	size_t buf_size = self->mmap_buf_size;
	uint8_t *buf;
	__u64 iova;

	/* Simple one page map */
	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
	check_mock_iova(buffer, iova, PAGE_SIZE);

	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
		   0);
	ASSERT_NE(MAP_FAILED, buf);

	/* EFAULT half way through mapping */
	ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);

	/* EFAULT on first page */
	ASSERT_EQ(0, munmap(buf, buf_size / 2));
	test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
}

static void
test_basic_file(struct __test_metadata *_metadata,
		struct _test_data_iommufd_mock_domain *self,
		const struct _fixture_variant_iommufd_mock_domain *variant)
{
	size_t buf_size = self->mmap_buf_size;
	uint8_t *buf;
	__u64 iova;
	int mfd_tmp;
	int prot = PROT_READ | PROT_WRITE;

	/* Simple one page map */
	test_ioctl_ioas_map_file(mfd, 0, PAGE_SIZE, &iova);
	check_mock_iova(mfd_buffer, iova, PAGE_SIZE);

	buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd_tmp);
	ASSERT_NE(MAP_FAILED, buf);

	test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size + 1, &iova);

	ASSERT_EQ(0, ftruncate(mfd_tmp, 0));
	test_err_ioctl_ioas_map_file(EINVAL, mfd_tmp, 0, buf_size, &iova);

	close(mfd_tmp);
}

TEST_F(iommufd_mock_domain, basic)
{
	if (variant->file)
		test_basic_file(_metadata, self, variant);
	else
		test_basic_mmap(_metadata, self, variant);
}

TEST_F(iommufd_mock_domain, ro_unshare)
{
	uint8_t *buf;
	__u64 iova;
	int fd;

	fd = open("/proc/self/exe", O_RDONLY);
	ASSERT_NE(-1, fd);

	buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
	ASSERT_NE(MAP_FAILED, buf);
	close(fd);

	/*
	 * There have been lots of changes to the "unshare" mechanism in
	 * get_user_pages(), make sure it works right. The write to the page
	 * after we map it for reading should not change the assigned PFN.
	 */
	ASSERT_EQ(0,
		  _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
				       &iova, IOMMU_IOAS_MAP_READABLE));
	check_mock_iova(buf, iova, PAGE_SIZE);
	memset(buf, 1, PAGE_SIZE);
	check_mock_iova(buf, iova, PAGE_SIZE);
	ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
}

TEST_F(iommufd_mock_domain, all_aligns)
{
	size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
						MOCK_PAGE_SIZE;
	size_t buf_size = self->mmap_buf_size;
	unsigned int start;
	unsigned int end;
	uint8_t *buf;
	int prot = PROT_READ | PROT_WRITE;
	int mfd;

	if (variant->file)
		buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
	else
		buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
	ASSERT_NE(MAP_FAILED, buf);
	check_refs(buf, buf_size, 0);

	/*
	 * Map every combination of page size and alignment within a big region,
	 * less for hugepage case as it takes so long to finish.
	 */
	for (start = 0; start < buf_size; start += test_step) {
		if (variant->hugepages)
			end = buf_size;
		else
			end = start + MOCK_PAGE_SIZE;
		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
			size_t length = end - start;
			__u64 iova;

			if (variant->file) {
				test_ioctl_ioas_map_file(mfd, start, length,
							 &iova);
			} else {
				test_ioctl_ioas_map(buf + start, length, &iova);
			}
			check_mock_iova(buf + start, iova, length);
			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
				   end / PAGE_SIZE * PAGE_SIZE -
					   start / PAGE_SIZE * PAGE_SIZE,
				   1);

			test_ioctl_ioas_unmap(iova, length);
		}
	}
	check_refs(buf, buf_size, 0);
	ASSERT_EQ(0, munmap(buf, buf_size));
	if (variant->file)
		close(mfd);
}

TEST_F(iommufd_mock_domain, all_aligns_copy)
{
	size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
						MOCK_PAGE_SIZE;
	size_t buf_size = self->mmap_buf_size;
	unsigned int start;
	unsigned int end;
	uint8_t *buf;
	int prot = PROT_READ | PROT_WRITE;
	int mfd;

	if (variant->file)
		buf = memfd_mmap(buf_size, prot, MAP_SHARED, &mfd);
	else
		buf = mmap(0, buf_size, prot, self->mmap_flags, -1, 0);
	ASSERT_NE(MAP_FAILED, buf);
	check_refs(buf, buf_size, 0);

	/*
	 * Map every combination of page size and alignment within a big region,
	 * less for hugepage case as it takes so long to finish.
	 */
	for (start = 0; start < buf_size; start += test_step) {
		if (variant->hugepages)
			end = buf_size;
		else
			end = start + MOCK_PAGE_SIZE;
		for (; end < buf_size; end += MOCK_PAGE_SIZE) {
			size_t length = end - start;
			unsigned int old_id;
			uint32_t mock_stdev_id;
			__u64 iova;

			if (variant->file) {
				test_ioctl_ioas_map_file(mfd, start, length,
							 &iova);
			} else {
				test_ioctl_ioas_map(buf + start, length, &iova);
			}

			/* Add and destroy a domain while the area exists */
			old_id = self->hwpt_ids[1];
			test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
					     &self->hwpt_ids[1], NULL);

			check_mock_iova(buf + start, iova, length);
			check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
				   end / PAGE_SIZE * PAGE_SIZE -
					   start / PAGE_SIZE * PAGE_SIZE,
				   1);

			test_ioctl_destroy(mock_stdev_id);
			self->hwpt_ids[1] = old_id;

			test_ioctl_ioas_unmap(iova, length);
		}
	}
	check_refs(buf, buf_size, 0);
	ASSERT_EQ(0, munmap(buf, buf_size));
	if (variant->file)
		close(mfd);
}

TEST_F(iommufd_mock_domain, user_copy)
{
	void *buf = variant->file ? mfd_buffer : buffer;
	struct iommu_test_cmd access_cmd = {
		.size = sizeof(access_cmd),
		.op = IOMMU_TEST_OP_ACCESS_PAGES,
		.access_pages = { .length = BUFFER_SIZE,
				  .uptr = (uintptr_t)buf },
	};
	struct iommu_ioas_copy copy_cmd = {
		.size = sizeof(copy_cmd),
		.flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE,
		.dst_ioas_id = self->ioas_id,
		.dst_iova = MOCK_APERTURE_START,
		.length = BUFFER_SIZE,
	};
	struct iommu_ioas_unmap unmap_cmd = {
		.size = sizeof(unmap_cmd),
		.ioas_id = self->ioas_id,
		.iova = MOCK_APERTURE_START,
		.length = BUFFER_SIZE,
	};
	unsigned int new_ioas_id, ioas_id;

	/* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
	test_ioctl_ioas_alloc(&ioas_id);
	if (variant->file) {
		test_ioctl_ioas_map_id_file(ioas_id, mfd, 0, BUFFER_SIZE,
					    &copy_cmd.src_iova);
	} else {
		test_ioctl_ioas_map_id(ioas_id, buf, BUFFER_SIZE,
				       &copy_cmd.src_iova);
	}
	test_cmd_create_access(ioas_id, &access_cmd.id,
			       MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);

	access_cmd.access_pages.iova = copy_cmd.src_iova;
	ASSERT_EQ(0,
		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
			&access_cmd));
	copy_cmd.src_ioas_id = ioas_id;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
	check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);

	/* Now replace the ioas with a new one */
	test_ioctl_ioas_alloc(&new_ioas_id);
	if (variant->file) {
		test_ioctl_ioas_map_id_file(new_ioas_id, mfd, 0, BUFFER_SIZE,
					    &copy_cmd.src_iova);
	} else {
		test_ioctl_ioas_map_id(new_ioas_id, buf, BUFFER_SIZE,
				       &copy_cmd.src_iova);
	}
	test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);

	/* Destroy the old ioas and cleanup copied mapping */
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
	test_ioctl_destroy(ioas_id);

	/* Then run the same test again with the new ioas */
	access_cmd.access_pages.iova = copy_cmd.src_iova;
	ASSERT_EQ(0,
		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
			&access_cmd));
	copy_cmd.src_ioas_id = new_ioas_id;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd));
	check_mock_iova(buf, MOCK_APERTURE_START, BUFFER_SIZE);

	test_cmd_destroy_access_pages(
		access_cmd.id, access_cmd.access_pages.out_access_pages_id);
	test_cmd_destroy_access(access_cmd.id);

	test_ioctl_destroy(new_ioas_id);
}

TEST_F(iommufd_mock_domain, replace)
{
	uint32_t ioas_id;

	test_ioctl_ioas_alloc(&ioas_id);

	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);

	/*
	 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
	 * should get enoent when we try to use it.
	 */
	if (variant->mock_domains == 1)
		test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
					     self->hwpt_ids[0]);

	test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
	if (variant->mock_domains >= 2) {
		test_cmd_mock_domain_replace(self->stdev_ids[0],
					     self->hwpt_ids[1]);
		test_cmd_mock_domain_replace(self->stdev_ids[0],
					     self->hwpt_ids[1]);
		test_cmd_mock_domain_replace(self->stdev_ids[0],
					     self->hwpt_ids[0]);
	}

	test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
	test_ioctl_destroy(ioas_id);
}

TEST_F(iommufd_mock_domain, alloc_hwpt)
{
	int i;

	for (i = 0; i != variant->mock_domains; i++) {
		uint32_t hwpt_id[2];
		uint32_t stddev_id;

		test_err_hwpt_alloc(EOPNOTSUPP,
				    self->idev_ids[i], self->ioas_id,
				    ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
				    0, &hwpt_id[0]);
		test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
				    IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);

		/* Do a hw_pagetable rotation test */
		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
		test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
		EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
		test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
		test_ioctl_destroy(hwpt_id[1]);

		test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
		test_ioctl_destroy(stddev_id);
		test_ioctl_destroy(hwpt_id[0]);
	}
}

FIXTURE(iommufd_dirty_tracking)
{
	int fd;
	uint32_t ioas_id;
	uint32_t hwpt_id;
	uint32_t stdev_id;
	uint32_t idev_id;
	unsigned long page_size;
	unsigned long bitmap_size;
	void *bitmap;
	void *buffer;
};

FIXTURE_VARIANT(iommufd_dirty_tracking)
{
	unsigned long buffer_size;
	bool hugepages;
};

FIXTURE_SETUP(iommufd_dirty_tracking)
{
	unsigned long size;
	int mmap_flags;
	void *vrc;
	int rc;

	if (variant->buffer_size < MOCK_PAGE_SIZE) {
		SKIP(return,
		     "Skipping buffer_size=%lu, less than MOCK_PAGE_SIZE=%lu",
		     variant->buffer_size, MOCK_PAGE_SIZE);
	}

	self->fd = open("/dev/iommu", O_RDWR);
	ASSERT_NE(-1, self->fd);

	rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
	if (rc || !self->buffer) {
		SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
			   variant->buffer_size, rc);
	}

	mmap_flags = MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED;
	if (variant->hugepages) {
		/*
		 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
		 * not available.
		 */
		mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
	}
	assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
	vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
		   mmap_flags, -1, 0);
	assert(vrc == self->buffer);

	self->page_size = MOCK_PAGE_SIZE;
	self->bitmap_size = variant->buffer_size / self->page_size;

	/* Provision with an extra (PAGE_SIZE) for the unaligned case */
	size = DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE);
	rc = posix_memalign(&self->bitmap, PAGE_SIZE, size + PAGE_SIZE);
	assert(!rc);
	assert(self->bitmap);
	assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);

	test_ioctl_ioas_alloc(&self->ioas_id);
	/* Enable 1M mock IOMMU hugepages */
	if (variant->hugepages) {
		test_cmd_mock_domain_flags(self->ioas_id,
					   MOCK_FLAGS_DEVICE_HUGE_IOVA,
					   &self->stdev_id, &self->hwpt_id,
					   &self->idev_id);
	} else {
		test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
				     &self->hwpt_id, &self->idev_id);
	}
}

FIXTURE_TEARDOWN(iommufd_dirty_tracking)
{
	munmap(self->buffer, variant->buffer_size);
	munmap(self->bitmap, DIV_ROUND_UP(self->bitmap_size, BITS_PER_BYTE));
	teardown_iommufd(self->fd, _metadata);
}

FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty8k)
{
	/* half of an u8 index bitmap */
	.buffer_size = 8UL * 1024UL,
};

FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty16k)
{
	/* one u8 index bitmap */
	.buffer_size = 16UL * 1024UL,
};

FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64k)
{
	/* one u32 index bitmap */
	.buffer_size = 64UL * 1024UL,
};

FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
{
	/* one u64 index bitmap */
	.buffer_size = 128UL * 1024UL,
};

FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty320k)
{
	/* two u64 index and trailing end bitmap */
	.buffer_size = 320UL * 1024UL,
};

FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M)
{
	/* 4K bitmap (64M IOVA range) */
	.buffer_size = 64UL * 1024UL * 1024UL,
};

FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty64M_huge)
{
	/* 4K bitmap (64M IOVA range) */
	.buffer_size = 64UL * 1024UL * 1024UL,
	.hugepages = true,
};

FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
{
	/* 8K bitmap (128M IOVA range) */
	.buffer_size = 128UL * 1024UL * 1024UL,
};

FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M_huge)
{
	/* 8K bitmap (128M IOVA range) */
	.buffer_size = 128UL * 1024UL * 1024UL,
	.hugepages = true,
};

TEST_F(iommufd_dirty_tracking, enforce_dirty)
{
	uint32_t ioas_id, stddev_id, idev_id;
	uint32_t hwpt_id, _hwpt_id;
	uint32_t dev_flags;

	/* Regular case */
	dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
	test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
				   NULL);
	test_ioctl_destroy(stddev_id);
	test_ioctl_destroy(hwpt_id);

	/* IOMMU device does not support dirty tracking */
	test_ioctl_ioas_alloc(&ioas_id);
	test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
				   &idev_id);
	test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
	test_ioctl_destroy(stddev_id);
}

TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
{
	uint32_t stddev_id;
	uint32_t hwpt_id;

	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
	test_cmd_set_dirty_tracking(hwpt_id, true);
	test_cmd_set_dirty_tracking(hwpt_id, false);

	test_ioctl_destroy(stddev_id);
	test_ioctl_destroy(hwpt_id);
}

TEST_F(iommufd_dirty_tracking, device_dirty_capability)
{
	uint32_t caps = 0;
	uint32_t stddev_id;
	uint32_t hwpt_id;

	test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
	test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
	test_cmd_get_hw_capabilities(self->idev_id, caps,
				     IOMMU_HW_CAP_DIRTY_TRACKING);
	ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
		  caps & IOMMU_HW_CAP_DIRTY_TRACKING);

	test_ioctl_destroy(stddev_id);
	test_ioctl_destroy(hwpt_id);
}

TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
{
	uint32_t page_size = MOCK_PAGE_SIZE;
	uint32_t hwpt_id;
	uint32_t ioas_id;

	if (variant->hugepages)
		page_size = MOCK_HUGE_PAGE_SIZE;

	test_ioctl_ioas_alloc(&ioas_id);
	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
				     variant->buffer_size, MOCK_APERTURE_START);

	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);

	test_cmd_set_dirty_tracking(hwpt_id, true);

	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
				MOCK_APERTURE_START, self->page_size, page_size,
				self->bitmap, self->bitmap_size, 0, _metadata);

	/* PAGE_SIZE unaligned bitmap */
	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
				MOCK_APERTURE_START, self->page_size, page_size,
				self->bitmap + MOCK_PAGE_SIZE,
				self->bitmap_size, 0, _metadata);

	/* u64 unaligned bitmap */
	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
				MOCK_APERTURE_START, self->page_size, page_size,
				self->bitmap + 0xff1, self->bitmap_size, 0,
				_metadata);

	test_ioctl_destroy(hwpt_id);
}

TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
{
	uint32_t page_size = MOCK_PAGE_SIZE;
	uint32_t hwpt_id;
	uint32_t ioas_id;

	if (variant->hugepages)
		page_size = MOCK_HUGE_PAGE_SIZE;

	test_ioctl_ioas_alloc(&ioas_id);
	test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
				     variant->buffer_size, MOCK_APERTURE_START);

	test_cmd_hwpt_alloc(self->idev_id, ioas_id,
			    IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);

	test_cmd_set_dirty_tracking(hwpt_id, true);

	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
				MOCK_APERTURE_START, self->page_size, page_size,
				self->bitmap, self->bitmap_size,
				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
				_metadata);

	/* Unaligned bitmap */
	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
				MOCK_APERTURE_START, self->page_size, page_size,
				self->bitmap + MOCK_PAGE_SIZE,
				self->bitmap_size,
				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
				_metadata);

	/* u64 unaligned bitmap */
	test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
				MOCK_APERTURE_START, self->page_size, page_size,
				self->bitmap + 0xff1, self->bitmap_size,
				IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
				_metadata);

	test_ioctl_destroy(hwpt_id);
}

/* VFIO compatibility IOCTLs */

TEST_F(iommufd, simple_ioctls)
{
	ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
	ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
}

TEST_F(iommufd, unmap_cmd)
{
	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
		.iova = MOCK_APERTURE_START,
		.size = PAGE_SIZE,
	};

	unmap_cmd.argsz = 1;
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));

	unmap_cmd.argsz = sizeof(unmap_cmd);
	unmap_cmd.flags = 1 << 31;
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));

	unmap_cmd.flags = 0;
	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
}

TEST_F(iommufd, map_cmd)
{
	struct vfio_iommu_type1_dma_map map_cmd = {
		.iova = MOCK_APERTURE_START,
		.size = PAGE_SIZE,
		.vaddr = (__u64)buffer,
	};

	map_cmd.argsz = 1;
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));

	map_cmd.argsz = sizeof(map_cmd);
	map_cmd.flags = 1 << 31;
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));

	/* Requires a domain to be attached */
	map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
}

TEST_F(iommufd, info_cmd)
{
	struct vfio_iommu_type1_info info_cmd = {};

	/* Invalid argsz */
	info_cmd.argsz = 1;
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));

	info_cmd.argsz = sizeof(info_cmd);
	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
}

TEST_F(iommufd, set_iommu_cmd)
{
	/* Requires a domain to be attached */
	EXPECT_ERRNO(ENODEV,
		     ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
	EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
}

TEST_F(iommufd, vfio_ioas)
{
	struct iommu_vfio_ioas vfio_ioas_cmd = {
		.size = sizeof(vfio_ioas_cmd),
		.op = IOMMU_VFIO_IOAS_GET,
	};
	__u32 ioas_id;

	/* ENODEV if there is no compat ioas */
	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));

	/* Invalid id for set */
	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
	EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));

	/* Valid id for set*/
	test_ioctl_ioas_alloc(&ioas_id);
	vfio_ioas_cmd.ioas_id = ioas_id;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));

	/* Same id comes back from get */
	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
	ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);

	/* Clear works */
	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
	vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
	EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
}

FIXTURE(vfio_compat_mock_domain)
{
	int fd;
	uint32_t ioas_id;
};

FIXTURE_VARIANT(vfio_compat_mock_domain)
{
	unsigned int version;
};

FIXTURE_SETUP(vfio_compat_mock_domain)
{
	struct iommu_vfio_ioas vfio_ioas_cmd = {
		.size = sizeof(vfio_ioas_cmd),
		.op = IOMMU_VFIO_IOAS_SET,
	};

	self->fd = open("/dev/iommu", O_RDWR);
	ASSERT_NE(-1, self->fd);

	/* Create what VFIO would consider a group */
	test_ioctl_ioas_alloc(&self->ioas_id);
	test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);

	/* Attach it to the vfio compat */
	vfio_ioas_cmd.ioas_id = self->ioas_id;
	ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
	ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
}

FIXTURE_TEARDOWN(vfio_compat_mock_domain)
{
	teardown_iommufd(self->fd, _metadata);
}

FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
{
	.version = VFIO_TYPE1v2_IOMMU,
};

FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
{
	.version = VFIO_TYPE1_IOMMU,
};

TEST_F(vfio_compat_mock_domain, simple_close)
{
}

TEST_F(vfio_compat_mock_domain, option_huge_pages)
{
	struct iommu_option cmd = {
		.size = sizeof(cmd),
		.option_id = IOMMU_OPTION_HUGE_PAGES,
		.op = IOMMU_OPTION_OP_GET,
		.val64 = 3,
		.object_id = self->ioas_id,
	};

	ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
	if (variant->version == VFIO_TYPE1_IOMMU) {
		ASSERT_EQ(0, cmd.val64);
	} else {
		ASSERT_EQ(1, cmd.val64);
	}
}

/*
 * Execute an ioctl command stored in buffer and check that the result does not
 * overflow memory.
 */
static bool is_filled(const void *buf, uint8_t c, size_t len)
{
	const uint8_t *cbuf = buf;

	for (; len; cbuf++, len--)
		if (*cbuf != c)
			return false;
	return true;
}

#define ioctl_check_buf(fd, cmd)                                         \
	({                                                               \
		size_t _cmd_len = *(__u32 *)buffer;                      \
									 \
		memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
		ASSERT_EQ(0, ioctl(fd, cmd, buffer));                    \
		ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA,       \
					  BUFFER_SIZE - _cmd_len));      \
	})

static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
				      struct vfio_iommu_type1_info *info_cmd)
{
	const struct vfio_info_cap_header *cap;

	ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
	cap = buffer + info_cmd->cap_offset;
	while (true) {
		size_t cap_size;

		if (cap->next)
			cap_size = (buffer + cap->next) - (void *)cap;
		else
			cap_size = (buffer + info_cmd->argsz) - (void *)cap;

		switch (cap->id) {
		case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
			struct vfio_iommu_type1_info_cap_iova_range *data =
				(void *)cap;

			ASSERT_EQ(1, data->header.version);
			ASSERT_EQ(1, data->nr_iovas);
			EXPECT_EQ(MOCK_APERTURE_START,
				  data->iova_ranges[0].start);
			EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
			break;
		}
		case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
			struct vfio_iommu_type1_info_dma_avail *data =
				(void *)cap;

			ASSERT_EQ(1, data->header.version);
			ASSERT_EQ(sizeof(*data), cap_size);
			break;
		}
		default:
			ASSERT_EQ(false, true);
			break;
		}
		if (!cap->next)
			break;

		ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
		ASSERT_GE(buffer + cap->next, (void *)cap);
		cap = buffer + cap->next;
	}
}

TEST_F(vfio_compat_mock_domain, get_info)
{
	struct vfio_iommu_type1_info *info_cmd = buffer;
	unsigned int i;
	size_t caplen;

	/* Pre-cap ABI */
	*info_cmd = (struct vfio_iommu_type1_info){
		.argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
	};
	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
	ASSERT_NE(0, info_cmd->iova_pgsizes);
	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
		  info_cmd->flags);

	/* Read the cap chain size */
	*info_cmd = (struct vfio_iommu_type1_info){
		.argsz = sizeof(*info_cmd),
	};
	ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
	ASSERT_NE(0, info_cmd->iova_pgsizes);
	ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
		  info_cmd->flags);
	ASSERT_EQ(0, info_cmd->cap_offset);
	ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);

	/* Read the caps, kernel should never create a corrupted caps */
	caplen = info_cmd->argsz;
	for (i = sizeof(*info_cmd); i < caplen; i++) {
		*info_cmd = (struct vfio_iommu_type1_info){
			.argsz = i,
		};
		ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
		ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
			  info_cmd->flags);
		if (!info_cmd->cap_offset)
			continue;
		check_vfio_info_cap_chain(_metadata, info_cmd);
	}
}

static void shuffle_array(unsigned long *array, size_t nelms)
{
	unsigned int i;

	/* Shuffle */
	for (i = 0; i != nelms; i++) {
		unsigned long tmp = array[i];
		unsigned int other = rand() % (nelms - i);

		array[i] = array[other];
		array[other] = tmp;
	}
}

TEST_F(vfio_compat_mock_domain, map)
{
	struct vfio_iommu_type1_dma_map map_cmd = {
		.argsz = sizeof(map_cmd),
		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
		.vaddr = (uintptr_t)buffer,
		.size = BUFFER_SIZE,
		.iova = MOCK_APERTURE_START,
	};
	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
		.argsz = sizeof(unmap_cmd),
		.size = BUFFER_SIZE,
		.iova = MOCK_APERTURE_START,
	};
	unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
	unsigned int i;

	/* Simple map/unmap */
	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);

	/* UNMAP_FLAG_ALL requires 0 iova/size */
	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
	unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
	EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));

	unmap_cmd.iova = 0;
	unmap_cmd.size = 0;
	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
	ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);

	/* Small pages */
	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
		map_cmd.iova = pages_iova[i] =
			MOCK_APERTURE_START + i * PAGE_SIZE;
		map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
		map_cmd.size = PAGE_SIZE;
		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
	}
	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));

	unmap_cmd.flags = 0;
	unmap_cmd.size = PAGE_SIZE;
	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
		unmap_cmd.iova = pages_iova[i];
		ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
	}
}

TEST_F(vfio_compat_mock_domain, huge_map)
{
	size_t buf_size = HUGEPAGE_SIZE * 2;
	struct vfio_iommu_type1_dma_map map_cmd = {
		.argsz = sizeof(map_cmd),
		.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
		.size = buf_size,
		.iova = MOCK_APERTURE_START,
	};
	struct vfio_iommu_type1_dma_unmap unmap_cmd = {
		.argsz = sizeof(unmap_cmd),
	};
	unsigned long pages_iova[16];
	unsigned int i;
	void *buf;

	/* Test huge pages and splitting */
	buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
		   MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
		   0);
	ASSERT_NE(MAP_FAILED, buf);
	map_cmd.vaddr = (uintptr_t)buf;
	ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));

	unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
	for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
		pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
	shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));

	/* type1 mode can cut up larger mappings, type1v2 always fails */
	for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
		unmap_cmd.iova = pages_iova[i];
		unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
		if (variant->version == VFIO_TYPE1_IOMMU) {
			ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
					   &unmap_cmd));
		} else {
			EXPECT_ERRNO(ENOENT,
				     ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
					   &unmap_cmd));
		}
	}
}

FIXTURE(iommufd_viommu)
{
	int fd;
	uint32_t ioas_id;
	uint32_t stdev_id;
	uint32_t hwpt_id;
	uint32_t nested_hwpt_id;
	uint32_t device_id;
	uint32_t viommu_id;
};

FIXTURE_VARIANT(iommufd_viommu)
{
	unsigned int viommu;
};

FIXTURE_SETUP(iommufd_viommu)
{
	self->fd = open("/dev/iommu", O_RDWR);
	ASSERT_NE(-1, self->fd);
	test_ioctl_ioas_alloc(&self->ioas_id);
	test_ioctl_set_default_memory_limit();

	if (variant->viommu) {
		struct iommu_hwpt_selftest data = {
			.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
		};

		test_cmd_mock_domain(self->ioas_id, &self->stdev_id, NULL,
				     &self->device_id);

		/* Allocate a nesting parent hwpt */
		test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
				    IOMMU_HWPT_ALLOC_NEST_PARENT,
				    &self->hwpt_id);

		/* Allocate a vIOMMU taking refcount of the parent hwpt */
		test_cmd_viommu_alloc(self->device_id, self->hwpt_id,
				      IOMMU_VIOMMU_TYPE_SELFTEST,
				      &self->viommu_id);

		/* Allocate a regular nested hwpt */
		test_cmd_hwpt_alloc_nested(self->device_id, self->viommu_id, 0,
					   &self->nested_hwpt_id,
					   IOMMU_HWPT_DATA_SELFTEST, &data,
					   sizeof(data));
	}
}

FIXTURE_TEARDOWN(iommufd_viommu)
{
	teardown_iommufd(self->fd, _metadata);
}

FIXTURE_VARIANT_ADD(iommufd_viommu, no_viommu)
{
	.viommu = 0,
};

FIXTURE_VARIANT_ADD(iommufd_viommu, mock_viommu)
{
	.viommu = 1,
};

TEST_F(iommufd_viommu, viommu_auto_destroy)
{
}

TEST_F(iommufd_viommu, viommu_negative_tests)
{
	uint32_t device_id = self->device_id;
	uint32_t ioas_id = self->ioas_id;
	uint32_t hwpt_id;

	if (self->device_id) {
		/* Negative test -- invalid hwpt (hwpt_id=0) */
		test_err_viommu_alloc(ENOENT, device_id, 0,
				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL);

		/* Negative test -- not a nesting parent hwpt */
		test_cmd_hwpt_alloc(device_id, ioas_id, 0, &hwpt_id);
		test_err_viommu_alloc(EINVAL, device_id, hwpt_id,
				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
		test_ioctl_destroy(hwpt_id);

		/* Negative test -- unsupported viommu type */
		test_err_viommu_alloc(EOPNOTSUPP, device_id, self->hwpt_id,
				      0xdead, NULL);
		EXPECT_ERRNO(EBUSY,
			     _test_ioctl_destroy(self->fd, self->hwpt_id));
		EXPECT_ERRNO(EBUSY,
			     _test_ioctl_destroy(self->fd, self->viommu_id));
	} else {
		test_err_viommu_alloc(ENOENT, self->device_id, self->hwpt_id,
				      IOMMU_VIOMMU_TYPE_SELFTEST, NULL);
	}
}

TEST_F(iommufd_viommu, viommu_alloc_nested_iopf)
{
	struct iommu_hwpt_selftest data = {
		.iotlb = IOMMU_TEST_IOTLB_DEFAULT,
	};
	uint32_t viommu_id = self->viommu_id;
	uint32_t dev_id = self->device_id;
	uint32_t iopf_hwpt_id;
	uint32_t fault_id;
	uint32_t fault_fd;

	if (self->device_id) {
		test_ioctl_fault_alloc(&fault_id, &fault_fd);
		test_err_hwpt_alloc_iopf(
			ENOENT, dev_id, viommu_id, UINT32_MAX,
			IOMMU_HWPT_FAULT_ID_VALID, &iopf_hwpt_id,
			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
		test_err_hwpt_alloc_iopf(
			EOPNOTSUPP, dev_id, viommu_id, fault_id,
			IOMMU_HWPT_FAULT_ID_VALID | (1 << 31), &iopf_hwpt_id,
			IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
		test_cmd_hwpt_alloc_iopf(
			dev_id, viommu_id, fault_id, IOMMU_HWPT_FAULT_ID_VALID,
			&iopf_hwpt_id, IOMMU_HWPT_DATA_SELFTEST, &data,
			sizeof(data));

		test_cmd_mock_domain_replace(self->stdev_id, iopf_hwpt_id);
		EXPECT_ERRNO(EBUSY,
			     _test_ioctl_destroy(self->fd, iopf_hwpt_id));
		test_cmd_trigger_iopf(dev_id, fault_fd);

		test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
		test_ioctl_destroy(iopf_hwpt_id);
		close(fault_fd);
		test_ioctl_destroy(fault_id);
	}
}

TEST_F(iommufd_viommu, vdevice_alloc)
{
	uint32_t viommu_id = self->viommu_id;
	uint32_t dev_id = self->device_id;
	uint32_t vdev_id = 0;

	if (dev_id) {
		/* Set vdev_id to 0x99, unset it, and set to 0x88 */
		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);
		test_err_vdevice_alloc(EEXIST, viommu_id, dev_id, 0x99,
				       &vdev_id);
		test_ioctl_destroy(vdev_id);
		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x88, &vdev_id);
		test_ioctl_destroy(vdev_id);
	} else {
		test_err_vdevice_alloc(ENOENT, viommu_id, dev_id, 0x99, NULL);
	}
}

TEST_F(iommufd_viommu, vdevice_cache)
{
	struct iommu_viommu_invalidate_selftest inv_reqs[2] = {};
	uint32_t viommu_id = self->viommu_id;
	uint32_t dev_id = self->device_id;
	uint32_t vdev_id = 0;
	uint32_t num_inv;

	if (dev_id) {
		test_cmd_vdevice_alloc(viommu_id, dev_id, 0x99, &vdev_id);

		test_cmd_dev_check_cache_all(dev_id,
					     IOMMU_TEST_DEV_CACHE_DEFAULT);

		/* Check data_type by passing zero-length array */
		num_inv = 0;
		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
					   sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Negative test: Invalid data_type */
		num_inv = 1;
		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST_INVALID,
					   sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Negative test: structure size sanity */
		num_inv = 1;
		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
					   sizeof(*inv_reqs) + 1, &num_inv);
		assert(!num_inv);

		num_inv = 1;
		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
					   1, &num_inv);
		assert(!num_inv);

		/* Negative test: invalid flag is passed */
		num_inv = 1;
		inv_reqs[0].flags = 0xffffffff;
		inv_reqs[0].vdev_id = 0x99;
		test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
					   sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Negative test: invalid data_uptr when array is not empty */
		num_inv = 1;
		inv_reqs[0].flags = 0;
		inv_reqs[0].vdev_id = 0x99;
		test_err_viommu_invalidate(EINVAL, viommu_id, NULL,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
					   sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Negative test: invalid entry_len when array is not empty */
		num_inv = 1;
		inv_reqs[0].flags = 0;
		inv_reqs[0].vdev_id = 0x99;
		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
					   0, &num_inv);
		assert(!num_inv);

		/* Negative test: invalid cache_id */
		num_inv = 1;
		inv_reqs[0].flags = 0;
		inv_reqs[0].vdev_id = 0x99;
		inv_reqs[0].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
					   sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/* Negative test: invalid vdev_id */
		num_inv = 1;
		inv_reqs[0].flags = 0;
		inv_reqs[0].vdev_id = 0x9;
		inv_reqs[0].cache_id = 0;
		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
					   sizeof(*inv_reqs), &num_inv);
		assert(!num_inv);

		/*
		 * Invalidate the 1st cache entry but fail the 2nd request
		 * due to invalid flags configuration in the 2nd request.
		 */
		num_inv = 2;
		inv_reqs[0].flags = 0;
		inv_reqs[0].vdev_id = 0x99;
		inv_reqs[0].cache_id = 0;
		inv_reqs[1].flags = 0xffffffff;
		inv_reqs[1].vdev_id = 0x99;
		inv_reqs[1].cache_id = 1;
		test_err_viommu_invalidate(EOPNOTSUPP, viommu_id, inv_reqs,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
					   sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 1);
		test_cmd_dev_check_cache(dev_id, 0, 0);
		test_cmd_dev_check_cache(dev_id, 1,
					 IOMMU_TEST_DEV_CACHE_DEFAULT);
		test_cmd_dev_check_cache(dev_id, 2,
					 IOMMU_TEST_DEV_CACHE_DEFAULT);
		test_cmd_dev_check_cache(dev_id, 3,
					 IOMMU_TEST_DEV_CACHE_DEFAULT);

		/*
		 * Invalidate the 1st cache entry but fail the 2nd request
		 * due to invalid cache_id configuration in the 2nd request.
		 */
		num_inv = 2;
		inv_reqs[0].flags = 0;
		inv_reqs[0].vdev_id = 0x99;
		inv_reqs[0].cache_id = 0;
		inv_reqs[1].flags = 0;
		inv_reqs[1].vdev_id = 0x99;
		inv_reqs[1].cache_id = MOCK_DEV_CACHE_ID_MAX + 1;
		test_err_viommu_invalidate(EINVAL, viommu_id, inv_reqs,
					   IOMMU_VIOMMU_INVALIDATE_DATA_SELFTEST,
					   sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 1);
		test_cmd_dev_check_cache(dev_id, 0, 0);
		test_cmd_dev_check_cache(dev_id, 1,
					 IOMMU_TEST_DEV_CACHE_DEFAULT);
		test_cmd_dev_check_cache(dev_id, 2,
					 IOMMU_TEST_DEV_CACHE_DEFAULT);
		test_cmd_dev_check_cache(dev_id, 3,
					 IOMMU_TEST_DEV_CACHE_DEFAULT);

		/* Invalidate the 2nd cache entry and verify */
		num_inv = 1;
		inv_reqs[0].flags = 0;
		inv_reqs[0].vdev_id = 0x99;
		inv_reqs[0].cache_id = 1;
		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
					   sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 1);
		test_cmd_dev_check_cache(dev_id, 0, 0);
		test_cmd_dev_check_cache(dev_id, 1, 0);
		test_cmd_dev_check_cache(dev_id, 2,
					 IOMMU_TEST_DEV_CACHE_DEFAULT);
		test_cmd_dev_check_cache(dev_id, 3,
					 IOMMU_TEST_DEV_CACHE_DEFAULT);

		/* Invalidate the 3rd and 4th cache entries and verify */
		num_inv = 2;
		inv_reqs[0].flags = 0;
		inv_reqs[0].vdev_id = 0x99;
		inv_reqs[0].cache_id = 2;
		inv_reqs[1].flags = 0;
		inv_reqs[1].vdev_id = 0x99;
		inv_reqs[1].cache_id = 3;
		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
					   sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 2);
		test_cmd_dev_check_cache_all(dev_id, 0);

		/* Invalidate all cache entries for nested_dev_id[1] and verify */
		num_inv = 1;
		inv_reqs[0].vdev_id = 0x99;
		inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
		test_cmd_viommu_invalidate(viommu_id, inv_reqs,
					   sizeof(*inv_reqs), &num_inv);
		assert(num_inv == 1);
		test_cmd_dev_check_cache_all(dev_id, 0);
		test_ioctl_destroy(vdev_id);
	}
}

TEST_HARNESS_MAIN