whiterose

linux unikernel
Log | Files | Refs | README | LICENSE | git clone https://git.ne02ptzero.me/git/whiterose

test_verifier.c (24812B)


      1 /*
      2  * Testsuite for eBPF verifier
      3  *
      4  * Copyright (c) 2014 PLUMgrid, http://plumgrid.com
      5  * Copyright (c) 2017 Facebook
      6  * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
      7  *
      8  * This program is free software; you can redistribute it and/or
      9  * modify it under the terms of version 2 of the GNU General Public
     10  * License as published by the Free Software Foundation.
     11  */
     12 
     13 #include <endian.h>
     14 #include <asm/types.h>
     15 #include <linux/types.h>
     16 #include <stdint.h>
     17 #include <stdio.h>
     18 #include <stdlib.h>
     19 #include <unistd.h>
     20 #include <errno.h>
     21 #include <string.h>
     22 #include <stddef.h>
     23 #include <stdbool.h>
     24 #include <sched.h>
     25 #include <limits.h>
     26 #include <assert.h>
     27 
     28 #include <sys/capability.h>
     29 
     30 #include <linux/unistd.h>
     31 #include <linux/filter.h>
     32 #include <linux/bpf_perf_event.h>
     33 #include <linux/bpf.h>
     34 #include <linux/if_ether.h>
     35 #include <linux/btf.h>
     36 
     37 #include <bpf/bpf.h>
     38 #include <bpf/libbpf.h>
     39 
     40 #ifdef HAVE_GENHDR
     41 # include "autoconf.h"
     42 #else
     43 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
     44 #  define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
     45 # endif
     46 #endif
     47 #include "bpf_rlimit.h"
     48 #include "bpf_rand.h"
     49 #include "bpf_util.h"
     50 #include "../../../include/linux/filter.h"
     51 
     52 #define MAX_INSNS	BPF_MAXINSNS
     53 #define MAX_FIXUPS	8
     54 #define MAX_NR_MAPS	14
     55 #define MAX_TEST_RUNS	8
     56 #define POINTER_VALUE	0xcafe4all
     57 #define TEST_DATA_LEN	64
     58 
     59 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS	(1 << 0)
     60 #define F_LOAD_WITH_STRICT_ALIGNMENT		(1 << 1)
     61 
     62 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
     63 static bool unpriv_disabled = false;
     64 static int skips;
     65 
     66 struct bpf_test {
     67 	const char *descr;
     68 	struct bpf_insn	insns[MAX_INSNS];
     69 	int fixup_map_hash_8b[MAX_FIXUPS];
     70 	int fixup_map_hash_48b[MAX_FIXUPS];
     71 	int fixup_map_hash_16b[MAX_FIXUPS];
     72 	int fixup_map_array_48b[MAX_FIXUPS];
     73 	int fixup_map_sockmap[MAX_FIXUPS];
     74 	int fixup_map_sockhash[MAX_FIXUPS];
     75 	int fixup_map_xskmap[MAX_FIXUPS];
     76 	int fixup_map_stacktrace[MAX_FIXUPS];
     77 	int fixup_prog1[MAX_FIXUPS];
     78 	int fixup_prog2[MAX_FIXUPS];
     79 	int fixup_map_in_map[MAX_FIXUPS];
     80 	int fixup_cgroup_storage[MAX_FIXUPS];
     81 	int fixup_percpu_cgroup_storage[MAX_FIXUPS];
     82 	int fixup_map_spin_lock[MAX_FIXUPS];
     83 	const char *errstr;
     84 	const char *errstr_unpriv;
     85 	uint32_t retval, retval_unpriv, insn_processed;
     86 	enum {
     87 		UNDEF,
     88 		ACCEPT,
     89 		REJECT
     90 	} result, result_unpriv;
     91 	enum bpf_prog_type prog_type;
     92 	uint8_t flags;
     93 	__u8 data[TEST_DATA_LEN];
     94 	void (*fill_helper)(struct bpf_test *self);
     95 	uint8_t runs;
     96 	struct {
     97 		uint32_t retval, retval_unpriv;
     98 		union {
     99 			__u8 data[TEST_DATA_LEN];
    100 			__u64 data64[TEST_DATA_LEN / 8];
    101 		};
    102 	} retvals[MAX_TEST_RUNS];
    103 };
    104 
    105 /* Note we want this to be 64 bit aligned so that the end of our array is
    106  * actually the end of the structure.
    107  */
    108 #define MAX_ENTRIES 11
    109 
    110 struct test_val {
    111 	unsigned int index;
    112 	int foo[MAX_ENTRIES];
    113 };
    114 
    115 struct other_val {
    116 	long long foo;
    117 	long long bar;
    118 };
    119 
    120 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
    121 {
    122 	/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
    123 #define PUSH_CNT 51
    124 	unsigned int len = BPF_MAXINSNS;
    125 	struct bpf_insn *insn = self->insns;
    126 	int i = 0, j, k = 0;
    127 
    128 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
    129 loop:
    130 	for (j = 0; j < PUSH_CNT; j++) {
    131 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
    132 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
    133 		i++;
    134 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
    135 		insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
    136 		insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
    137 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
    138 					 BPF_FUNC_skb_vlan_push),
    139 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
    140 		i++;
    141 	}
    142 
    143 	for (j = 0; j < PUSH_CNT; j++) {
    144 		insn[i++] = BPF_LD_ABS(BPF_B, 0);
    145 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 2);
    146 		i++;
    147 		insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
    148 		insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
    149 					 BPF_FUNC_skb_vlan_pop),
    150 		insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 2);
    151 		i++;
    152 	}
    153 	if (++k < 5)
    154 		goto loop;
    155 
    156 	for (; i < len - 1; i++)
    157 		insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
    158 	insn[len - 1] = BPF_EXIT_INSN();
    159 }
    160 
    161 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
    162 {
    163 	struct bpf_insn *insn = self->insns;
    164 	unsigned int len = BPF_MAXINSNS;
    165 	int i = 0;
    166 
    167 	insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
    168 	insn[i++] = BPF_LD_ABS(BPF_B, 0);
    169 	insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
    170 	i++;
    171 	while (i < len - 1)
    172 		insn[i++] = BPF_LD_ABS(BPF_B, 1);
    173 	insn[i] = BPF_EXIT_INSN();
    174 }
    175 
    176 static void bpf_fill_rand_ld_dw(struct bpf_test *self)
    177 {
    178 	struct bpf_insn *insn = self->insns;
    179 	uint64_t res = 0;
    180 	int i = 0;
    181 
    182 	insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
    183 	while (i < self->retval) {
    184 		uint64_t val = bpf_semi_rand_get();
    185 		struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
    186 
    187 		res ^= val;
    188 		insn[i++] = tmp[0];
    189 		insn[i++] = tmp[1];
    190 		insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
    191 	}
    192 	insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
    193 	insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
    194 	insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
    195 	insn[i] = BPF_EXIT_INSN();
    196 	res ^= (res >> 32);
    197 	self->retval = (uint32_t)res;
    198 }
    199 
    200 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */
    201 #define BPF_SK_LOOKUP							\
    202 	/* struct bpf_sock_tuple tuple = {} */				\
    203 	BPF_MOV64_IMM(BPF_REG_2, 0),					\
    204 	BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8),			\
    205 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16),		\
    206 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24),		\
    207 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32),		\
    208 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40),		\
    209 	BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48),		\
    210 	/* sk = sk_lookup_tcp(ctx, &tuple, sizeof tuple, 0, 0) */	\
    211 	BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),				\
    212 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48),				\
    213 	BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)),	\
    214 	BPF_MOV64_IMM(BPF_REG_4, 0),					\
    215 	BPF_MOV64_IMM(BPF_REG_5, 0),					\
    216 	BPF_EMIT_CALL(BPF_FUNC_sk_lookup_tcp)
    217 
    218 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return
    219  * value into 0 and does necessary preparation for direct packet access
    220  * through r2. The allowed access range is 8 bytes.
    221  */
    222 #define BPF_DIRECT_PKT_R2						\
    223 	BPF_MOV64_IMM(BPF_REG_0, 0),					\
    224 	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,			\
    225 		    offsetof(struct __sk_buff, data)),			\
    226 	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,			\
    227 		    offsetof(struct __sk_buff, data_end)),		\
    228 	BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),				\
    229 	BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),				\
    230 	BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),			\
    231 	BPF_EXIT_INSN()
    232 
    233 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random
    234  * positive u32, and zero-extend it into 64-bit.
    235  */
    236 #define BPF_RAND_UEXT_R7						\
    237 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,			\
    238 		     BPF_FUNC_get_prandom_u32),				\
    239 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),				\
    240 	BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33),				\
    241 	BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
    242 
    243 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random
    244  * negative u32, and sign-extend it into 64-bit.
    245  */
    246 #define BPF_RAND_SEXT_R7						\
    247 	BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,			\
    248 		     BPF_FUNC_get_prandom_u32),				\
    249 	BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),				\
    250 	BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000),			\
    251 	BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32),				\
    252 	BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
    253 
    254 static struct bpf_test tests[] = {
    255 #define FILL_ARRAY
    256 #include <verifier/tests.h>
    257 #undef FILL_ARRAY
    258 };
    259 
    260 static int probe_filter_length(const struct bpf_insn *fp)
    261 {
    262 	int len;
    263 
    264 	for (len = MAX_INSNS - 1; len > 0; --len)
    265 		if (fp[len].code != 0 || fp[len].imm != 0)
    266 			break;
    267 	return len + 1;
    268 }
    269 
    270 static bool skip_unsupported_map(enum bpf_map_type map_type)
    271 {
    272 	if (!bpf_probe_map_type(map_type, 0)) {
    273 		printf("SKIP (unsupported map type %d)\n", map_type);
    274 		skips++;
    275 		return true;
    276 	}
    277 	return false;
    278 }
    279 
    280 static int create_map(uint32_t type, uint32_t size_key,
    281 		      uint32_t size_value, uint32_t max_elem)
    282 {
    283 	int fd;
    284 
    285 	fd = bpf_create_map(type, size_key, size_value, max_elem,
    286 			    type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0);
    287 	if (fd < 0) {
    288 		if (skip_unsupported_map(type))
    289 			return -1;
    290 		printf("Failed to create hash map '%s'!\n", strerror(errno));
    291 	}
    292 
    293 	return fd;
    294 }
    295 
    296 static void update_map(int fd, int index)
    297 {
    298 	struct test_val value = {
    299 		.index = (6 + 1) * sizeof(int),
    300 		.foo[6] = 0xabcdef12,
    301 	};
    302 
    303 	assert(!bpf_map_update_elem(fd, &index, &value, 0));
    304 }
    305 
    306 static int create_prog_dummy1(enum bpf_prog_type prog_type)
    307 {
    308 	struct bpf_insn prog[] = {
    309 		BPF_MOV64_IMM(BPF_REG_0, 42),
    310 		BPF_EXIT_INSN(),
    311 	};
    312 
    313 	return bpf_load_program(prog_type, prog,
    314 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
    315 }
    316 
    317 static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
    318 {
    319 	struct bpf_insn prog[] = {
    320 		BPF_MOV64_IMM(BPF_REG_3, idx),
    321 		BPF_LD_MAP_FD(BPF_REG_2, mfd),
    322 		BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
    323 			     BPF_FUNC_tail_call),
    324 		BPF_MOV64_IMM(BPF_REG_0, 41),
    325 		BPF_EXIT_INSN(),
    326 	};
    327 
    328 	return bpf_load_program(prog_type, prog,
    329 				ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
    330 }
    331 
    332 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
    333 			     int p1key)
    334 {
    335 	int p2key = 1;
    336 	int mfd, p1fd, p2fd;
    337 
    338 	mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
    339 			     sizeof(int), max_elem, 0);
    340 	if (mfd < 0) {
    341 		if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
    342 			return -1;
    343 		printf("Failed to create prog array '%s'!\n", strerror(errno));
    344 		return -1;
    345 	}
    346 
    347 	p1fd = create_prog_dummy1(prog_type);
    348 	p2fd = create_prog_dummy2(prog_type, mfd, p2key);
    349 	if (p1fd < 0 || p2fd < 0)
    350 		goto out;
    351 	if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
    352 		goto out;
    353 	if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
    354 		goto out;
    355 	close(p2fd);
    356 	close(p1fd);
    357 
    358 	return mfd;
    359 out:
    360 	close(p2fd);
    361 	close(p1fd);
    362 	close(mfd);
    363 	return -1;
    364 }
    365 
    366 static int create_map_in_map(void)
    367 {
    368 	int inner_map_fd, outer_map_fd;
    369 
    370 	inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
    371 				      sizeof(int), 1, 0);
    372 	if (inner_map_fd < 0) {
    373 		if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
    374 			return -1;
    375 		printf("Failed to create array '%s'!\n", strerror(errno));
    376 		return inner_map_fd;
    377 	}
    378 
    379 	outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
    380 					     sizeof(int), inner_map_fd, 1, 0);
    381 	if (outer_map_fd < 0) {
    382 		if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
    383 			return -1;
    384 		printf("Failed to create array of maps '%s'!\n",
    385 		       strerror(errno));
    386 	}
    387 
    388 	close(inner_map_fd);
    389 
    390 	return outer_map_fd;
    391 }
    392 
    393 static int create_cgroup_storage(bool percpu)
    394 {
    395 	enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
    396 		BPF_MAP_TYPE_CGROUP_STORAGE;
    397 	int fd;
    398 
    399 	fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
    400 			    TEST_DATA_LEN, 0, 0);
    401 	if (fd < 0) {
    402 		if (skip_unsupported_map(type))
    403 			return -1;
    404 		printf("Failed to create cgroup storage '%s'!\n",
    405 		       strerror(errno));
    406 	}
    407 
    408 	return fd;
    409 }
    410 
    411 #define BTF_INFO_ENC(kind, kind_flag, vlen) \
    412 	((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
    413 #define BTF_TYPE_ENC(name, info, size_or_type) \
    414 	(name), (info), (size_or_type)
    415 #define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
    416 	((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
    417 #define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
    418 	BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
    419 	BTF_INT_ENC(encoding, bits_offset, bits)
    420 #define BTF_MEMBER_ENC(name, type, bits_offset) \
    421 	(name), (type), (bits_offset)
    422 
    423 struct btf_raw_data {
    424 	__u32 raw_types[64];
    425 	const char *str_sec;
    426 	__u32 str_sec_size;
    427 };
    428 
    429 /* struct bpf_spin_lock {
    430  *   int val;
    431  * };
    432  * struct val {
    433  *   int cnt;
    434  *   struct bpf_spin_lock l;
    435  * };
    436  */
    437 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
    438 static __u32 btf_raw_types[] = {
    439 	/* int */
    440 	BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
    441 	/* struct bpf_spin_lock */                      /* [2] */
    442 	BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
    443 	BTF_MEMBER_ENC(15, 1, 0), /* int val; */
    444 	/* struct val */                                /* [3] */
    445 	BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
    446 	BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */
    447 	BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */
    448 };
    449 
    450 static int load_btf(void)
    451 {
    452 	struct btf_header hdr = {
    453 		.magic = BTF_MAGIC,
    454 		.version = BTF_VERSION,
    455 		.hdr_len = sizeof(struct btf_header),
    456 		.type_len = sizeof(btf_raw_types),
    457 		.str_off = sizeof(btf_raw_types),
    458 		.str_len = sizeof(btf_str_sec),
    459 	};
    460 	void *ptr, *raw_btf;
    461 	int btf_fd;
    462 
    463 	ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
    464 			       sizeof(btf_str_sec));
    465 
    466 	memcpy(ptr, &hdr, sizeof(hdr));
    467 	ptr += sizeof(hdr);
    468 	memcpy(ptr, btf_raw_types, hdr.type_len);
    469 	ptr += hdr.type_len;
    470 	memcpy(ptr, btf_str_sec, hdr.str_len);
    471 	ptr += hdr.str_len;
    472 
    473 	btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
    474 	free(raw_btf);
    475 	if (btf_fd < 0)
    476 		return -1;
    477 	return btf_fd;
    478 }
    479 
    480 static int create_map_spin_lock(void)
    481 {
    482 	struct bpf_create_map_attr attr = {
    483 		.name = "test_map",
    484 		.map_type = BPF_MAP_TYPE_ARRAY,
    485 		.key_size = 4,
    486 		.value_size = 8,
    487 		.max_entries = 1,
    488 		.btf_key_type_id = 1,
    489 		.btf_value_type_id = 3,
    490 	};
    491 	int fd, btf_fd;
    492 
    493 	btf_fd = load_btf();
    494 	if (btf_fd < 0)
    495 		return -1;
    496 	attr.btf_fd = btf_fd;
    497 	fd = bpf_create_map_xattr(&attr);
    498 	if (fd < 0)
    499 		printf("Failed to create map with spin_lock\n");
    500 	return fd;
    501 }
    502 
    503 static char bpf_vlog[UINT_MAX >> 8];
    504 
    505 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
    506 			  struct bpf_insn *prog, int *map_fds)
    507 {
    508 	int *fixup_map_hash_8b = test->fixup_map_hash_8b;
    509 	int *fixup_map_hash_48b = test->fixup_map_hash_48b;
    510 	int *fixup_map_hash_16b = test->fixup_map_hash_16b;
    511 	int *fixup_map_array_48b = test->fixup_map_array_48b;
    512 	int *fixup_map_sockmap = test->fixup_map_sockmap;
    513 	int *fixup_map_sockhash = test->fixup_map_sockhash;
    514 	int *fixup_map_xskmap = test->fixup_map_xskmap;
    515 	int *fixup_map_stacktrace = test->fixup_map_stacktrace;
    516 	int *fixup_prog1 = test->fixup_prog1;
    517 	int *fixup_prog2 = test->fixup_prog2;
    518 	int *fixup_map_in_map = test->fixup_map_in_map;
    519 	int *fixup_cgroup_storage = test->fixup_cgroup_storage;
    520 	int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
    521 	int *fixup_map_spin_lock = test->fixup_map_spin_lock;
    522 
    523 	if (test->fill_helper)
    524 		test->fill_helper(test);
    525 
    526 	/* Allocating HTs with 1 elem is fine here, since we only test
    527 	 * for verifier and not do a runtime lookup, so the only thing
    528 	 * that really matters is value size in this case.
    529 	 */
    530 	if (*fixup_map_hash_8b) {
    531 		map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
    532 					sizeof(long long), 1);
    533 		do {
    534 			prog[*fixup_map_hash_8b].imm = map_fds[0];
    535 			fixup_map_hash_8b++;
    536 		} while (*fixup_map_hash_8b);
    537 	}
    538 
    539 	if (*fixup_map_hash_48b) {
    540 		map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
    541 					sizeof(struct test_val), 1);
    542 		do {
    543 			prog[*fixup_map_hash_48b].imm = map_fds[1];
    544 			fixup_map_hash_48b++;
    545 		} while (*fixup_map_hash_48b);
    546 	}
    547 
    548 	if (*fixup_map_hash_16b) {
    549 		map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
    550 					sizeof(struct other_val), 1);
    551 		do {
    552 			prog[*fixup_map_hash_16b].imm = map_fds[2];
    553 			fixup_map_hash_16b++;
    554 		} while (*fixup_map_hash_16b);
    555 	}
    556 
    557 	if (*fixup_map_array_48b) {
    558 		map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
    559 					sizeof(struct test_val), 1);
    560 		update_map(map_fds[3], 0);
    561 		do {
    562 			prog[*fixup_map_array_48b].imm = map_fds[3];
    563 			fixup_map_array_48b++;
    564 		} while (*fixup_map_array_48b);
    565 	}
    566 
    567 	if (*fixup_prog1) {
    568 		map_fds[4] = create_prog_array(prog_type, 4, 0);
    569 		do {
    570 			prog[*fixup_prog1].imm = map_fds[4];
    571 			fixup_prog1++;
    572 		} while (*fixup_prog1);
    573 	}
    574 
    575 	if (*fixup_prog2) {
    576 		map_fds[5] = create_prog_array(prog_type, 8, 7);
    577 		do {
    578 			prog[*fixup_prog2].imm = map_fds[5];
    579 			fixup_prog2++;
    580 		} while (*fixup_prog2);
    581 	}
    582 
    583 	if (*fixup_map_in_map) {
    584 		map_fds[6] = create_map_in_map();
    585 		do {
    586 			prog[*fixup_map_in_map].imm = map_fds[6];
    587 			fixup_map_in_map++;
    588 		} while (*fixup_map_in_map);
    589 	}
    590 
    591 	if (*fixup_cgroup_storage) {
    592 		map_fds[7] = create_cgroup_storage(false);
    593 		do {
    594 			prog[*fixup_cgroup_storage].imm = map_fds[7];
    595 			fixup_cgroup_storage++;
    596 		} while (*fixup_cgroup_storage);
    597 	}
    598 
    599 	if (*fixup_percpu_cgroup_storage) {
    600 		map_fds[8] = create_cgroup_storage(true);
    601 		do {
    602 			prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
    603 			fixup_percpu_cgroup_storage++;
    604 		} while (*fixup_percpu_cgroup_storage);
    605 	}
    606 	if (*fixup_map_sockmap) {
    607 		map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
    608 					sizeof(int), 1);
    609 		do {
    610 			prog[*fixup_map_sockmap].imm = map_fds[9];
    611 			fixup_map_sockmap++;
    612 		} while (*fixup_map_sockmap);
    613 	}
    614 	if (*fixup_map_sockhash) {
    615 		map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
    616 					sizeof(int), 1);
    617 		do {
    618 			prog[*fixup_map_sockhash].imm = map_fds[10];
    619 			fixup_map_sockhash++;
    620 		} while (*fixup_map_sockhash);
    621 	}
    622 	if (*fixup_map_xskmap) {
    623 		map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
    624 					sizeof(int), 1);
    625 		do {
    626 			prog[*fixup_map_xskmap].imm = map_fds[11];
    627 			fixup_map_xskmap++;
    628 		} while (*fixup_map_xskmap);
    629 	}
    630 	if (*fixup_map_stacktrace) {
    631 		map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
    632 					 sizeof(u64), 1);
    633 		do {
    634 			prog[*fixup_map_stacktrace].imm = map_fds[12];
    635 			fixup_map_stacktrace++;
    636 		} while (*fixup_map_stacktrace);
    637 	}
    638 	if (*fixup_map_spin_lock) {
    639 		map_fds[13] = create_map_spin_lock();
    640 		do {
    641 			prog[*fixup_map_spin_lock].imm = map_fds[13];
    642 			fixup_map_spin_lock++;
    643 		} while (*fixup_map_spin_lock);
    644 	}
    645 }
    646 
    647 static int set_admin(bool admin)
    648 {
    649 	cap_t caps;
    650 	const cap_value_t cap_val = CAP_SYS_ADMIN;
    651 	int ret = -1;
    652 
    653 	caps = cap_get_proc();
    654 	if (!caps) {
    655 		perror("cap_get_proc");
    656 		return -1;
    657 	}
    658 	if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
    659 				admin ? CAP_SET : CAP_CLEAR)) {
    660 		perror("cap_set_flag");
    661 		goto out;
    662 	}
    663 	if (cap_set_proc(caps)) {
    664 		perror("cap_set_proc");
    665 		goto out;
    666 	}
    667 	ret = 0;
    668 out:
    669 	if (cap_free(caps))
    670 		perror("cap_free");
    671 	return ret;
    672 }
    673 
    674 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
    675 			    void *data, size_t size_data)
    676 {
    677 	__u8 tmp[TEST_DATA_LEN << 2];
    678 	__u32 size_tmp = sizeof(tmp);
    679 	uint32_t retval;
    680 	int err;
    681 
    682 	if (unpriv)
    683 		set_admin(true);
    684 	err = bpf_prog_test_run(fd_prog, 1, data, size_data,
    685 				tmp, &size_tmp, &retval, NULL);
    686 	if (unpriv)
    687 		set_admin(false);
    688 	if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
    689 		printf("Unexpected bpf_prog_test_run error ");
    690 		return err;
    691 	}
    692 	if (!err && retval != expected_val &&
    693 	    expected_val != POINTER_VALUE) {
    694 		printf("FAIL retval %d != %d ", retval, expected_val);
    695 		return 1;
    696 	}
    697 
    698 	return 0;
    699 }
    700 
    701 static void do_test_single(struct bpf_test *test, bool unpriv,
    702 			   int *passes, int *errors)
    703 {
    704 	int fd_prog, expected_ret, alignment_prevented_execution;
    705 	int prog_len, prog_type = test->prog_type;
    706 	struct bpf_insn *prog = test->insns;
    707 	int run_errs, run_successes;
    708 	int map_fds[MAX_NR_MAPS];
    709 	const char *expected_err;
    710 	int fixup_skips;
    711 	__u32 pflags;
    712 	int i, err;
    713 
    714 	for (i = 0; i < MAX_NR_MAPS; i++)
    715 		map_fds[i] = -1;
    716 
    717 	if (!prog_type)
    718 		prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
    719 	fixup_skips = skips;
    720 	do_test_fixup(test, prog_type, prog, map_fds);
    721 	/* If there were some map skips during fixup due to missing bpf
    722 	 * features, skip this test.
    723 	 */
    724 	if (fixup_skips != skips)
    725 		return;
    726 	prog_len = probe_filter_length(prog);
    727 
    728 	pflags = 0;
    729 	if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
    730 		pflags |= BPF_F_STRICT_ALIGNMENT;
    731 	if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
    732 		pflags |= BPF_F_ANY_ALIGNMENT;
    733 	fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
    734 				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
    735 	if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
    736 		printf("SKIP (unsupported program type %d)\n", prog_type);
    737 		skips++;
    738 		goto close_fds;
    739 	}
    740 
    741 	expected_ret = unpriv && test->result_unpriv != UNDEF ?
    742 		       test->result_unpriv : test->result;
    743 	expected_err = unpriv && test->errstr_unpriv ?
    744 		       test->errstr_unpriv : test->errstr;
    745 
    746 	alignment_prevented_execution = 0;
    747 
    748 	if (expected_ret == ACCEPT) {
    749 		if (fd_prog < 0) {
    750 			printf("FAIL\nFailed to load prog '%s'!\n",
    751 			       strerror(errno));
    752 			goto fail_log;
    753 		}
    754 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
    755 		if (fd_prog >= 0 &&
    756 		    (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
    757 			alignment_prevented_execution = 1;
    758 #endif
    759 	} else {
    760 		if (fd_prog >= 0) {
    761 			printf("FAIL\nUnexpected success to load!\n");
    762 			goto fail_log;
    763 		}
    764 		if (!strstr(bpf_vlog, expected_err)) {
    765 			printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
    766 			      expected_err, bpf_vlog);
    767 			goto fail_log;
    768 		}
    769 	}
    770 
    771 	if (test->insn_processed) {
    772 		uint32_t insn_processed;
    773 		char *proc;
    774 
    775 		proc = strstr(bpf_vlog, "processed ");
    776 		insn_processed = atoi(proc + 10);
    777 		if (test->insn_processed != insn_processed) {
    778 			printf("FAIL\nUnexpected insn_processed %u vs %u\n",
    779 			       insn_processed, test->insn_processed);
    780 			goto fail_log;
    781 		}
    782 	}
    783 
    784 	run_errs = 0;
    785 	run_successes = 0;
    786 	if (!alignment_prevented_execution && fd_prog >= 0) {
    787 		uint32_t expected_val;
    788 		int i;
    789 
    790 		if (!test->runs) {
    791 			expected_val = unpriv && test->retval_unpriv ?
    792 				test->retval_unpriv : test->retval;
    793 
    794 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
    795 					       test->data, sizeof(test->data));
    796 			if (err)
    797 				run_errs++;
    798 			else
    799 				run_successes++;
    800 		}
    801 
    802 		for (i = 0; i < test->runs; i++) {
    803 			if (unpriv && test->retvals[i].retval_unpriv)
    804 				expected_val = test->retvals[i].retval_unpriv;
    805 			else
    806 				expected_val = test->retvals[i].retval;
    807 
    808 			err = do_prog_test_run(fd_prog, unpriv, expected_val,
    809 					       test->retvals[i].data,
    810 					       sizeof(test->retvals[i].data));
    811 			if (err) {
    812 				printf("(run %d/%d) ", i + 1, test->runs);
    813 				run_errs++;
    814 			} else {
    815 				run_successes++;
    816 			}
    817 		}
    818 	}
    819 
    820 	if (!run_errs) {
    821 		(*passes)++;
    822 		if (run_successes > 1)
    823 			printf("%d cases ", run_successes);
    824 		printf("OK");
    825 		if (alignment_prevented_execution)
    826 			printf(" (NOTE: not executed due to unknown alignment)");
    827 		printf("\n");
    828 	} else {
    829 		printf("\n");
    830 		goto fail_log;
    831 	}
    832 close_fds:
    833 	close(fd_prog);
    834 	for (i = 0; i < MAX_NR_MAPS; i++)
    835 		close(map_fds[i]);
    836 	sched_yield();
    837 	return;
    838 fail_log:
    839 	(*errors)++;
    840 	printf("%s", bpf_vlog);
    841 	goto close_fds;
    842 }
    843 
    844 static bool is_admin(void)
    845 {
    846 	cap_t caps;
    847 	cap_flag_value_t sysadmin = CAP_CLEAR;
    848 	const cap_value_t cap_val = CAP_SYS_ADMIN;
    849 
    850 #ifdef CAP_IS_SUPPORTED
    851 	if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
    852 		perror("cap_get_flag");
    853 		return false;
    854 	}
    855 #endif
    856 	caps = cap_get_proc();
    857 	if (!caps) {
    858 		perror("cap_get_proc");
    859 		return false;
    860 	}
    861 	if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
    862 		perror("cap_get_flag");
    863 	if (cap_free(caps))
    864 		perror("cap_free");
    865 	return (sysadmin == CAP_SET);
    866 }
    867 
    868 static void get_unpriv_disabled()
    869 {
    870 	char buf[2];
    871 	FILE *fd;
    872 
    873 	fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
    874 	if (!fd) {
    875 		perror("fopen /proc/sys/"UNPRIV_SYSCTL);
    876 		unpriv_disabled = true;
    877 		return;
    878 	}
    879 	if (fgets(buf, 2, fd) == buf && atoi(buf))
    880 		unpriv_disabled = true;
    881 	fclose(fd);
    882 }
    883 
    884 static bool test_as_unpriv(struct bpf_test *test)
    885 {
    886 	return !test->prog_type ||
    887 	       test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
    888 	       test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
    889 }
    890 
    891 static int do_test(bool unpriv, unsigned int from, unsigned int to)
    892 {
    893 	int i, passes = 0, errors = 0;
    894 
    895 	for (i = from; i < to; i++) {
    896 		struct bpf_test *test = &tests[i];
    897 
    898 		/* Program types that are not supported by non-root we
    899 		 * skip right away.
    900 		 */
    901 		if (test_as_unpriv(test) && unpriv_disabled) {
    902 			printf("#%d/u %s SKIP\n", i, test->descr);
    903 			skips++;
    904 		} else if (test_as_unpriv(test)) {
    905 			if (!unpriv)
    906 				set_admin(false);
    907 			printf("#%d/u %s ", i, test->descr);
    908 			do_test_single(test, true, &passes, &errors);
    909 			if (!unpriv)
    910 				set_admin(true);
    911 		}
    912 
    913 		if (unpriv) {
    914 			printf("#%d/p %s SKIP\n", i, test->descr);
    915 			skips++;
    916 		} else {
    917 			printf("#%d/p %s ", i, test->descr);
    918 			do_test_single(test, false, &passes, &errors);
    919 		}
    920 	}
    921 
    922 	printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
    923 	       skips, errors);
    924 	return errors ? EXIT_FAILURE : EXIT_SUCCESS;
    925 }
    926 
    927 int main(int argc, char **argv)
    928 {
    929 	unsigned int from = 0, to = ARRAY_SIZE(tests);
    930 	bool unpriv = !is_admin();
    931 
    932 	if (argc == 3) {
    933 		unsigned int l = atoi(argv[argc - 2]);
    934 		unsigned int u = atoi(argv[argc - 1]);
    935 
    936 		if (l < to && u < to) {
    937 			from = l;
    938 			to   = u + 1;
    939 		}
    940 	} else if (argc == 2) {
    941 		unsigned int t = atoi(argv[argc - 1]);
    942 
    943 		if (t < to) {
    944 			from = t;
    945 			to   = t + 1;
    946 		}
    947 	}
    948 
    949 	get_unpriv_disabled();
    950 	if (unpriv && unpriv_disabled) {
    951 		printf("Cannot run as unprivileged user with sysctl %s.\n",
    952 		       UNPRIV_SYSCTL);
    953 		return EXIT_FAILURE;
    954 	}
    955 
    956 	bpf_semi_rand_init();
    957 	return do_test(unpriv, from, to);
    958 }