Contributors: 8
Author Tokens Token Proportion Commits Commit Proportion
Jakub Kiciński 6226 92.40% 2 10.53%
Alexei Starovoitov 287 4.26% 2 10.53%
Daniel Borkmann 126 1.87% 10 52.63%
Vadim Fedorenko 52 0.77% 1 5.26%
Willem de Bruijn 15 0.22% 1 5.26%
Jiong Wang 14 0.21% 1 5.26%
Ilya Leoshkevich 14 0.21% 1 5.26%
Eric Dumazet 4 0.06% 1 5.26%
Total 6738 19

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195
{
	"access skb fields ok",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, len)),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, mark)),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, pkt_type)),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, queue_mapping)),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, protocol)),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, vlan_present)),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, vlan_tci)),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, napi_id)),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"access skb fields bad1",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -4),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"access skb fields bad2",
	.insns = {
	BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 9),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
	BPF_EXIT_INSN(),
	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, pkt_type)),
	BPF_EXIT_INSN(),
	},
	.fixup_map_hash_8b = { 4 },
	.errstr = "different pointers",
	.errstr_unpriv = "R1 pointer comparison",
	.result = REJECT,
},
{
	"access skb fields bad3",
	.insns = {
	BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, pkt_type)),
	BPF_EXIT_INSN(),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
	BPF_EXIT_INSN(),
	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
	BPF_JMP_IMM(BPF_JA, 0, 0, -12),
	},
	.fixup_map_hash_8b = { 6 },
	.errstr = "different pointers",
	.errstr_unpriv = "R1 pointer comparison",
	.result = REJECT,
},
{
	"access skb fields bad4",
	.insns = {
	BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 3),
	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
		    offsetof(struct __sk_buff, len)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
	BPF_LD_MAP_FD(BPF_REG_1, 0),
	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
	BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
	BPF_EXIT_INSN(),
	BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
	BPF_JMP_IMM(BPF_JA, 0, 0, -13),
	},
	.fixup_map_hash_8b = { 7 },
	.errstr = "different pointers",
	.errstr_unpriv = "R1 pointer comparison",
	.result = REJECT,
},
{
	"invalid access __sk_buff family",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, family)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"invalid access __sk_buff remote_ip4",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, remote_ip4)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"invalid access __sk_buff local_ip4",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, local_ip4)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"invalid access __sk_buff remote_ip6",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, remote_ip6)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"invalid access __sk_buff local_ip6",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, local_ip6)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"invalid access __sk_buff remote_port",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, remote_port)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"invalid access __sk_buff remote_port",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, local_port)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"valid access __sk_buff family",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, family)),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"valid access __sk_buff remote_ip4",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, remote_ip4)),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"valid access __sk_buff local_ip4",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, local_ip4)),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"valid access __sk_buff remote_ip6",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, remote_ip6[0])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, remote_ip6[1])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, remote_ip6[2])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, remote_ip6[3])),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"valid access __sk_buff local_ip6",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, local_ip6[0])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, local_ip6[1])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, local_ip6[2])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, local_ip6[3])),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"valid access __sk_buff remote_port",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, remote_port)),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"valid access __sk_buff remote_port",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, local_port)),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"invalid access of tc_classid for SK_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, tc_classid)),
	BPF_EXIT_INSN(),
	},
	.result = REJECT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
	.errstr = "invalid bpf_context access",
},
{
	"invalid access of skb->mark for SK_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, mark)),
	BPF_EXIT_INSN(),
	},
	.result =  REJECT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
	.errstr = "invalid bpf_context access",
},
{
	"check skb->mark is not writeable by SK_SKB",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, mark)),
	BPF_EXIT_INSN(),
	},
	.result =  REJECT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
	.errstr = "invalid bpf_context access",
},
{
	"check skb->tc_index is writeable by SK_SKB",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, tc_index)),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"check skb->priority is writeable by SK_SKB",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, priority)),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"direct packet read for SK_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
		    offsetof(struct __sk_buff, data)),
	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
		    offsetof(struct __sk_buff, data_end)),
	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"direct packet write for SK_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
		    offsetof(struct __sk_buff, data)),
	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
		    offsetof(struct __sk_buff, data_end)),
	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
	BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"overlapping checks for direct packet access SK_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
		    offsetof(struct __sk_buff, data)),
	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
		    offsetof(struct __sk_buff, data_end)),
	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
	BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
	BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
	BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
	BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
	"check skb->mark is not writeable by sockets",
	.insns = {
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
		    offsetof(struct __sk_buff, mark)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.errstr_unpriv = "R1 leaks addr",
	.result = REJECT,
},
{
	"check skb->tc_index is not writeable by sockets",
	.insns = {
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
		    offsetof(struct __sk_buff, tc_index)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.errstr_unpriv = "R1 leaks addr",
	.result = REJECT,
},
{
	"check cb access: byte",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0]) + 1),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0]) + 2),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0]) + 3),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[1])),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[1]) + 1),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[1]) + 2),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[1]) + 3),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[2])),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[2]) + 1),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[2]) + 2),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[2]) + 3),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[3])),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[3]) + 1),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[3]) + 2),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[3]) + 3),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4])),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4]) + 1),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4]) + 2),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4]) + 3),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0]) + 1),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0]) + 2),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0]) + 3),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[1])),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[1]) + 1),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[1]) + 2),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[1]) + 3),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[2])),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[2]) + 1),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[2]) + 2),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[2]) + 3),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[3])),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[3]) + 1),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[3]) + 2),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[3]) + 3),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[4])),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[4]) + 1),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[4]) + 2),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[4]) + 3),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"__sk_buff->hash, offset 0, byte store not permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, hash)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"__sk_buff->tc_index, offset 3, byte store not permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, tc_index) + 3),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"check skb->hash byte load permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash)),
#else
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 3),
#endif
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"check skb->hash byte load permitted 1",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 1),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"check skb->hash byte load permitted 2",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 2),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"check skb->hash byte load permitted 3",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 3),
#else
	BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash)),
#endif
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"check cb access: byte, wrong type",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
	.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
	"check cb access: half",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0]) + 2),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[1])),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[1]) + 2),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[2])),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[2]) + 2),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[3])),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[3]) + 2),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4])),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4]) + 2),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0]) + 2),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[1])),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[1]) + 2),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[2])),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[2]) + 2),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[3])),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[3]) + 2),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[4])),
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[4]) + 2),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"check cb access: half, unaligned",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0]) + 1),
	BPF_EXIT_INSN(),
	},
	.errstr = "misaligned context access",
	.result = REJECT,
	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
	"check __sk_buff->hash, offset 0, half store not permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, hash)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"check __sk_buff->tc_index, offset 2, half store not permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, tc_index) + 2),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"check skb->hash half load permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash)),
#else
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 2),
#endif
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"check skb->hash half load permitted 2",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 2),
#else
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash)),
#endif
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"check skb->hash half load not permitted, unaligned 1",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 1),
#else
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 3),
#endif
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
	"check skb->hash half load not permitted, unaligned 3",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 3),
#else
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hash) + 1),
#endif
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
	"check cb access: half, wrong type",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
	.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
	"check cb access: word",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[1])),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[2])),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[3])),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[1])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[2])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[3])),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[4])),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"check cb access: word, unaligned 1",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0]) + 2),
	BPF_EXIT_INSN(),
	},
	.errstr = "misaligned context access",
	.result = REJECT,
	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
	"check cb access: word, unaligned 2",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4]) + 1),
	BPF_EXIT_INSN(),
	},
	.errstr = "misaligned context access",
	.result = REJECT,
	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
	"check cb access: word, unaligned 3",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4]) + 2),
	BPF_EXIT_INSN(),
	},
	.errstr = "misaligned context access",
	.result = REJECT,
	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
	"check cb access: word, unaligned 4",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4]) + 3),
	BPF_EXIT_INSN(),
	},
	.errstr = "misaligned context access",
	.result = REJECT,
	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
	"check cb access: double",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[2])),
	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[2])),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
},
{
	"check cb access: double, unaligned 1",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[1])),
	BPF_EXIT_INSN(),
	},
	.errstr = "misaligned context access",
	.result = REJECT,
	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
	"check cb access: double, unaligned 2",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[3])),
	BPF_EXIT_INSN(),
	},
	.errstr = "misaligned context access",
	.result = REJECT,
	.flags = F_LOAD_WITH_STRICT_ALIGNMENT,
},
{
	"check cb access: double, oob 1",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[4])),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"check cb access: double, oob 2",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[4])),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"check __sk_buff->ifindex dw store not permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, ifindex)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"check __sk_buff->ifindex dw load not permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, ifindex)),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"check cb access: double, wrong type",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
	.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
	"check out of range skb->cb access",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0]) + 256),
	BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.errstr_unpriv = "",
	.result = REJECT,
	.prog_type = BPF_PROG_TYPE_SCHED_ACT,
},
{
	"write skb fields from socket prog",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[4])),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, mark)),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, tc_index)),
	BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[2])),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.errstr_unpriv = "R1 leaks addr",
	.result_unpriv = REJECT,
},
{
	"write skb fields from tc_cls_act prog",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, cb[0])),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, mark)),
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, tc_index)),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, tc_index)),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, cb[3])),
	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, tstamp)),
	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, tstamp)),
	BPF_EXIT_INSN(),
	},
	.errstr_unpriv = "",
	.result_unpriv = REJECT,
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
	"check skb->data half load not permitted",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, data)),
#else
	BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, data) + 2),
#endif
	BPF_EXIT_INSN(),
	},
	.result = REJECT,
	.errstr = "invalid bpf_context access",
},
{
	"read gso_segs from CGROUP_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, gso_segs)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
	"read gso_segs from CGROUP_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
		    offsetof(struct __sk_buff, gso_segs)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
	"write gso_segs from CGROUP_SKB",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, gso_segs)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = REJECT,
	.result_unpriv = REJECT,
	.errstr = "invalid bpf_context access off=164 size=4",
	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
	"read gso_segs from CLS",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, gso_segs)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
	"read gso_size from CGROUP_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, gso_size)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
	"read gso_size from CGROUP_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
		    offsetof(struct __sk_buff, gso_size)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
	"write gso_size from CGROUP_SKB",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, gso_size)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = REJECT,
	.result_unpriv = REJECT,
	.errstr = "invalid bpf_context access off=176 size=4",
	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
	"read gso_size from CLS",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, gso_size)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
	"padding after gso_size is not accessible",
	.insns = {
	BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
		    offsetofend(struct __sk_buff, gso_size)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = REJECT,
	.result_unpriv = REJECT,
	.errstr = "invalid bpf_context access off=180 size=4",
	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
	"read hwtstamp from CGROUP_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hwtstamp)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
	"read hwtstamp from CGROUP_SKB",
	.insns = {
	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
		    offsetof(struct __sk_buff, hwtstamp)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
	"write hwtstamp from CGROUP_SKB",
	.insns = {
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
		    offsetof(struct __sk_buff, hwtstamp)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = REJECT,
	.result_unpriv = REJECT,
	.errstr = "invalid bpf_context access off=184 size=8",
	.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
	"read hwtstamp from CLS",
	.insns = {
	BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
		    offsetof(struct __sk_buff, hwtstamp)),
	BPF_MOV64_IMM(BPF_REG_0, 0),
	BPF_EXIT_INSN(),
	},
	.result = ACCEPT,
	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
	"check wire_len is not readable by sockets",
	.insns = {
		BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
			    offsetof(struct __sk_buff, wire_len)),
		BPF_EXIT_INSN(),
	},
	.errstr = "invalid bpf_context access",
	.result = REJECT,
},
{
	"check wire_len is readable by tc classifier",
	.insns = {
		BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
			    offsetof(struct __sk_buff, wire_len)),
		BPF_EXIT_INSN(),
	},
	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
	.result = ACCEPT,
},
{
	"check wire_len is not writable by tc classifier",
	.insns = {
		BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
			    offsetof(struct __sk_buff, wire_len)),
		BPF_EXIT_INSN(),
	},
	.prog_type = BPF_PROG_TYPE_SCHED_CLS,
	.errstr = "invalid bpf_context access",
	.errstr_unpriv = "R1 leaks addr",
	.result = REJECT,
},
{
       "pkt > pkt_end taken check",
       .insns = {
       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                //  0. r2 = *(u32 *)(r1 + data_end)
                   offsetof(struct __sk_buff, data_end)),
       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,                //  1. r4 = *(u32 *)(r1 + data)
                   offsetof(struct __sk_buff, data)),
       BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),                    //  2. r3 = r4
       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),                  //  3. r3 += 42
       BPF_MOV64_IMM(BPF_REG_1, 0),                            //  4. r1 = 0
       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),          //  5. if r3 > r2 goto 8
       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),                  //  6. r4 += 14
       BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),                    //  7. r1 = r4
       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 1),          //  8. if r3 > r2 goto 10
       BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),            //  9. r2 = *(u8 *)(r1 + 9)
       BPF_MOV64_IMM(BPF_REG_0, 0),                            // 10. r0 = 0
       BPF_EXIT_INSN(),                                        // 11. exit
       },
       .result = ACCEPT,
       .prog_type = BPF_PROG_TYPE_SK_SKB,
       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
       "pkt_end < pkt taken check",
       .insns = {
       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,                //  0. r2 = *(u32 *)(r1 + data_end)
                   offsetof(struct __sk_buff, data_end)),
       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,                //  1. r4 = *(u32 *)(r1 + data)
                   offsetof(struct __sk_buff, data)),
       BPF_MOV64_REG(BPF_REG_3, BPF_REG_4),                    //  2. r3 = r4
       BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 42),                  //  3. r3 += 42
       BPF_MOV64_IMM(BPF_REG_1, 0),                            //  4. r1 = 0
       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_2, 2),          //  5. if r3 > r2 goto 8
       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 14),                  //  6. r4 += 14
       BPF_MOV64_REG(BPF_REG_1, BPF_REG_4),                    //  7. r1 = r4
       BPF_JMP_REG(BPF_JLT, BPF_REG_2, BPF_REG_3, 1),          //  8. if r2 < r3 goto 10
       BPF_LDX_MEM(BPF_H, BPF_REG_2, BPF_REG_1, 9),            //  9. r2 = *(u8 *)(r1 + 9)
       BPF_MOV64_IMM(BPF_REG_0, 0),                            // 10. r0 = 0
       BPF_EXIT_INSN(),                                        // 11. exit
       },
       .result = ACCEPT,
       .prog_type = BPF_PROG_TYPE_SK_SKB,
       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},