whiterose

linux unikernel
Log | Files | Refs | README | LICENSE | git clone https://git.ne02ptzero.me/git/whiterose

core.c (55462B)


      1 /*
      2  * Linux Socket Filter - Kernel level socket filtering
      3  *
      4  * Based on the design of the Berkeley Packet Filter. The new
      5  * internal format has been designed by PLUMgrid:
      6  *
      7  *	Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
      8  *
      9  * Authors:
     10  *
     11  *	Jay Schulist <jschlst@samba.org>
     12  *	Alexei Starovoitov <ast@plumgrid.com>
     13  *	Daniel Borkmann <dborkman@redhat.com>
     14  *
     15  * This program is free software; you can redistribute it and/or
     16  * modify it under the terms of the GNU General Public License
     17  * as published by the Free Software Foundation; either version
     18  * 2 of the License, or (at your option) any later version.
     19  *
     20  * Andi Kleen - Fix a few bad bugs and races.
     21  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
     22  */
     23 
     24 #include <uapi/linux/btf.h>
     25 #include <linux/filter.h>
     26 #include <linux/skbuff.h>
     27 #include <linux/vmalloc.h>
     28 #include <linux/random.h>
     29 #include <linux/moduleloader.h>
     30 #include <linux/bpf.h>
     31 #include <linux/btf.h>
     32 #include <linux/frame.h>
     33 #include <linux/rbtree_latch.h>
     34 #include <linux/kallsyms.h>
     35 #include <linux/rcupdate.h>
     36 #include <linux/perf_event.h>
     37 
     38 #include <asm/unaligned.h>
     39 
     40 /* Registers */
     41 #define BPF_R0	regs[BPF_REG_0]
     42 #define BPF_R1	regs[BPF_REG_1]
     43 #define BPF_R2	regs[BPF_REG_2]
     44 #define BPF_R3	regs[BPF_REG_3]
     45 #define BPF_R4	regs[BPF_REG_4]
     46 #define BPF_R5	regs[BPF_REG_5]
     47 #define BPF_R6	regs[BPF_REG_6]
     48 #define BPF_R7	regs[BPF_REG_7]
     49 #define BPF_R8	regs[BPF_REG_8]
     50 #define BPF_R9	regs[BPF_REG_9]
     51 #define BPF_R10	regs[BPF_REG_10]
     52 
     53 /* Named registers */
     54 #define DST	regs[insn->dst_reg]
     55 #define SRC	regs[insn->src_reg]
     56 #define FP	regs[BPF_REG_FP]
     57 #define AX	regs[BPF_REG_AX]
     58 #define ARG1	regs[BPF_REG_ARG1]
     59 #define CTX	regs[BPF_REG_CTX]
     60 #define IMM	insn->imm
     61 
     62 /* No hurry in this branch
     63  *
     64  * Exported for the bpf jit load helper.
     65  */
     66 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
     67 {
     68 	u8 *ptr = NULL;
     69 
     70 	if (k >= SKF_NET_OFF)
     71 		ptr = skb_network_header(skb) + k - SKF_NET_OFF;
     72 	else if (k >= SKF_LL_OFF)
     73 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
     74 
     75 	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
     76 		return ptr;
     77 
     78 	return NULL;
     79 }
     80 
     81 struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags)
     82 {
     83 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
     84 	struct bpf_prog_aux *aux;
     85 	struct bpf_prog *fp;
     86 
     87 	size = round_up(size, PAGE_SIZE);
     88 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
     89 	if (fp == NULL)
     90 		return NULL;
     91 
     92 	aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
     93 	if (aux == NULL) {
     94 		vfree(fp);
     95 		return NULL;
     96 	}
     97 
     98 	fp->pages = size / PAGE_SIZE;
     99 	fp->aux = aux;
    100 	fp->aux->prog = fp;
    101 	fp->jit_requested = ebpf_jit_enabled();
    102 
    103 	INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
    104 
    105 	return fp;
    106 }
    107 
    108 struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
    109 {
    110 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
    111 	struct bpf_prog *prog;
    112 	int cpu;
    113 
    114 	prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags);
    115 	if (!prog)
    116 		return NULL;
    117 
    118 	prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags);
    119 	if (!prog->aux->stats) {
    120 		kfree(prog->aux);
    121 		vfree(prog);
    122 		return NULL;
    123 	}
    124 
    125 	for_each_possible_cpu(cpu) {
    126 		struct bpf_prog_stats *pstats;
    127 
    128 		pstats = per_cpu_ptr(prog->aux->stats, cpu);
    129 		u64_stats_init(&pstats->syncp);
    130 	}
    131 	return prog;
    132 }
    133 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
    134 
    135 int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
    136 {
    137 	if (!prog->aux->nr_linfo || !prog->jit_requested)
    138 		return 0;
    139 
    140 	prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
    141 					 sizeof(*prog->aux->jited_linfo),
    142 					 GFP_KERNEL | __GFP_NOWARN);
    143 	if (!prog->aux->jited_linfo)
    144 		return -ENOMEM;
    145 
    146 	return 0;
    147 }
    148 
    149 void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
    150 {
    151 	kfree(prog->aux->jited_linfo);
    152 	prog->aux->jited_linfo = NULL;
    153 }
    154 
    155 void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
    156 {
    157 	if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
    158 		bpf_prog_free_jited_linfo(prog);
    159 }
    160 
    161 /* The jit engine is responsible to provide an array
    162  * for insn_off to the jited_off mapping (insn_to_jit_off).
    163  *
    164  * The idx to this array is the insn_off.  Hence, the insn_off
    165  * here is relative to the prog itself instead of the main prog.
    166  * This array has one entry for each xlated bpf insn.
    167  *
    168  * jited_off is the byte off to the last byte of the jited insn.
    169  *
    170  * Hence, with
    171  * insn_start:
    172  *      The first bpf insn off of the prog.  The insn off
    173  *      here is relative to the main prog.
    174  *      e.g. if prog is a subprog, insn_start > 0
    175  * linfo_idx:
    176  *      The prog's idx to prog->aux->linfo and jited_linfo
    177  *
    178  * jited_linfo[linfo_idx] = prog->bpf_func
    179  *
    180  * For i > linfo_idx,
    181  *
    182  * jited_linfo[i] = prog->bpf_func +
    183  *	insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
    184  */
    185 void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
    186 			       const u32 *insn_to_jit_off)
    187 {
    188 	u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
    189 	const struct bpf_line_info *linfo;
    190 	void **jited_linfo;
    191 
    192 	if (!prog->aux->jited_linfo)
    193 		/* Userspace did not provide linfo */
    194 		return;
    195 
    196 	linfo_idx = prog->aux->linfo_idx;
    197 	linfo = &prog->aux->linfo[linfo_idx];
    198 	insn_start = linfo[0].insn_off;
    199 	insn_end = insn_start + prog->len;
    200 
    201 	jited_linfo = &prog->aux->jited_linfo[linfo_idx];
    202 	jited_linfo[0] = prog->bpf_func;
    203 
    204 	nr_linfo = prog->aux->nr_linfo - linfo_idx;
    205 
    206 	for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
    207 		/* The verifier ensures that linfo[i].insn_off is
    208 		 * strictly increasing
    209 		 */
    210 		jited_linfo[i] = prog->bpf_func +
    211 			insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
    212 }
    213 
    214 void bpf_prog_free_linfo(struct bpf_prog *prog)
    215 {
    216 	bpf_prog_free_jited_linfo(prog);
    217 	kvfree(prog->aux->linfo);
    218 }
    219 
    220 struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
    221 				  gfp_t gfp_extra_flags)
    222 {
    223 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
    224 	struct bpf_prog *fp;
    225 	u32 pages, delta;
    226 	int ret;
    227 
    228 	BUG_ON(fp_old == NULL);
    229 
    230 	size = round_up(size, PAGE_SIZE);
    231 	pages = size / PAGE_SIZE;
    232 	if (pages <= fp_old->pages)
    233 		return fp_old;
    234 
    235 	delta = pages - fp_old->pages;
    236 	ret = __bpf_prog_charge(fp_old->aux->user, delta);
    237 	if (ret)
    238 		return NULL;
    239 
    240 	fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
    241 	if (fp == NULL) {
    242 		__bpf_prog_uncharge(fp_old->aux->user, delta);
    243 	} else {
    244 		memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
    245 		fp->pages = pages;
    246 		fp->aux->prog = fp;
    247 
    248 		/* We keep fp->aux from fp_old around in the new
    249 		 * reallocated structure.
    250 		 */
    251 		fp_old->aux = NULL;
    252 		__bpf_prog_free(fp_old);
    253 	}
    254 
    255 	return fp;
    256 }
    257 
    258 void __bpf_prog_free(struct bpf_prog *fp)
    259 {
    260 	if (fp->aux) {
    261 		free_percpu(fp->aux->stats);
    262 		kfree(fp->aux);
    263 	}
    264 	vfree(fp);
    265 }
    266 
    267 int bpf_prog_calc_tag(struct bpf_prog *fp)
    268 {
    269 	const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
    270 	u32 raw_size = bpf_prog_tag_scratch_size(fp);
    271 	u32 digest[SHA_DIGEST_WORDS];
    272 	u32 ws[SHA_WORKSPACE_WORDS];
    273 	u32 i, bsize, psize, blocks;
    274 	struct bpf_insn *dst;
    275 	bool was_ld_map;
    276 	u8 *raw, *todo;
    277 	__be32 *result;
    278 	__be64 *bits;
    279 
    280 	raw = vmalloc(raw_size);
    281 	if (!raw)
    282 		return -ENOMEM;
    283 
    284 	sha_init(digest);
    285 	memset(ws, 0, sizeof(ws));
    286 
    287 	/* We need to take out the map fd for the digest calculation
    288 	 * since they are unstable from user space side.
    289 	 */
    290 	dst = (void *)raw;
    291 	for (i = 0, was_ld_map = false; i < fp->len; i++) {
    292 		dst[i] = fp->insnsi[i];
    293 		if (!was_ld_map &&
    294 		    dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) &&
    295 		    dst[i].src_reg == BPF_PSEUDO_MAP_FD) {
    296 			was_ld_map = true;
    297 			dst[i].imm = 0;
    298 		} else if (was_ld_map &&
    299 			   dst[i].code == 0 &&
    300 			   dst[i].dst_reg == 0 &&
    301 			   dst[i].src_reg == 0 &&
    302 			   dst[i].off == 0) {
    303 			was_ld_map = false;
    304 			dst[i].imm = 0;
    305 		} else {
    306 			was_ld_map = false;
    307 		}
    308 	}
    309 
    310 	psize = bpf_prog_insn_size(fp);
    311 	memset(&raw[psize], 0, raw_size - psize);
    312 	raw[psize++] = 0x80;
    313 
    314 	bsize  = round_up(psize, SHA_MESSAGE_BYTES);
    315 	blocks = bsize / SHA_MESSAGE_BYTES;
    316 	todo   = raw;
    317 	if (bsize - psize >= sizeof(__be64)) {
    318 		bits = (__be64 *)(todo + bsize - sizeof(__be64));
    319 	} else {
    320 		bits = (__be64 *)(todo + bsize + bits_offset);
    321 		blocks++;
    322 	}
    323 	*bits = cpu_to_be64((psize - 1) << 3);
    324 
    325 	while (blocks--) {
    326 		sha_transform(digest, todo, ws);
    327 		todo += SHA_MESSAGE_BYTES;
    328 	}
    329 
    330 	result = (__force __be32 *)digest;
    331 	for (i = 0; i < SHA_DIGEST_WORDS; i++)
    332 		result[i] = cpu_to_be32(digest[i]);
    333 	memcpy(fp->tag, result, sizeof(fp->tag));
    334 
    335 	vfree(raw);
    336 	return 0;
    337 }
    338 
    339 static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old,
    340 				s32 end_new, u32 curr, const bool probe_pass)
    341 {
    342 	const s64 imm_min = S32_MIN, imm_max = S32_MAX;
    343 	s32 delta = end_new - end_old;
    344 	s64 imm = insn->imm;
    345 
    346 	if (curr < pos && curr + imm + 1 >= end_old)
    347 		imm += delta;
    348 	else if (curr >= end_new && curr + imm + 1 < end_new)
    349 		imm -= delta;
    350 	if (imm < imm_min || imm > imm_max)
    351 		return -ERANGE;
    352 	if (!probe_pass)
    353 		insn->imm = imm;
    354 	return 0;
    355 }
    356 
    357 static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old,
    358 				s32 end_new, u32 curr, const bool probe_pass)
    359 {
    360 	const s32 off_min = S16_MIN, off_max = S16_MAX;
    361 	s32 delta = end_new - end_old;
    362 	s32 off = insn->off;
    363 
    364 	if (curr < pos && curr + off + 1 >= end_old)
    365 		off += delta;
    366 	else if (curr >= end_new && curr + off + 1 < end_new)
    367 		off -= delta;
    368 	if (off < off_min || off > off_max)
    369 		return -ERANGE;
    370 	if (!probe_pass)
    371 		insn->off = off;
    372 	return 0;
    373 }
    374 
    375 static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old,
    376 			    s32 end_new, const bool probe_pass)
    377 {
    378 	u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0);
    379 	struct bpf_insn *insn = prog->insnsi;
    380 	int ret = 0;
    381 
    382 	for (i = 0; i < insn_cnt; i++, insn++) {
    383 		u8 code;
    384 
    385 		/* In the probing pass we still operate on the original,
    386 		 * unpatched image in order to check overflows before we
    387 		 * do any other adjustments. Therefore skip the patchlet.
    388 		 */
    389 		if (probe_pass && i == pos) {
    390 			i = end_new;
    391 			insn = prog->insnsi + end_old;
    392 		}
    393 		code = insn->code;
    394 		if ((BPF_CLASS(code) != BPF_JMP &&
    395 		     BPF_CLASS(code) != BPF_JMP32) ||
    396 		    BPF_OP(code) == BPF_EXIT)
    397 			continue;
    398 		/* Adjust offset of jmps if we cross patch boundaries. */
    399 		if (BPF_OP(code) == BPF_CALL) {
    400 			if (insn->src_reg != BPF_PSEUDO_CALL)
    401 				continue;
    402 			ret = bpf_adj_delta_to_imm(insn, pos, end_old,
    403 						   end_new, i, probe_pass);
    404 		} else {
    405 			ret = bpf_adj_delta_to_off(insn, pos, end_old,
    406 						   end_new, i, probe_pass);
    407 		}
    408 		if (ret)
    409 			break;
    410 	}
    411 
    412 	return ret;
    413 }
    414 
    415 static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
    416 {
    417 	struct bpf_line_info *linfo;
    418 	u32 i, nr_linfo;
    419 
    420 	nr_linfo = prog->aux->nr_linfo;
    421 	if (!nr_linfo || !delta)
    422 		return;
    423 
    424 	linfo = prog->aux->linfo;
    425 
    426 	for (i = 0; i < nr_linfo; i++)
    427 		if (off < linfo[i].insn_off)
    428 			break;
    429 
    430 	/* Push all off < linfo[i].insn_off by delta */
    431 	for (; i < nr_linfo; i++)
    432 		linfo[i].insn_off += delta;
    433 }
    434 
    435 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
    436 				       const struct bpf_insn *patch, u32 len)
    437 {
    438 	u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
    439 	const u32 cnt_max = S16_MAX;
    440 	struct bpf_prog *prog_adj;
    441 
    442 	/* Since our patchlet doesn't expand the image, we're done. */
    443 	if (insn_delta == 0) {
    444 		memcpy(prog->insnsi + off, patch, sizeof(*patch));
    445 		return prog;
    446 	}
    447 
    448 	insn_adj_cnt = prog->len + insn_delta;
    449 
    450 	/* Reject anything that would potentially let the insn->off
    451 	 * target overflow when we have excessive program expansions.
    452 	 * We need to probe here before we do any reallocation where
    453 	 * we afterwards may not fail anymore.
    454 	 */
    455 	if (insn_adj_cnt > cnt_max &&
    456 	    bpf_adj_branches(prog, off, off + 1, off + len, true))
    457 		return NULL;
    458 
    459 	/* Several new instructions need to be inserted. Make room
    460 	 * for them. Likely, there's no need for a new allocation as
    461 	 * last page could have large enough tailroom.
    462 	 */
    463 	prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt),
    464 				    GFP_USER);
    465 	if (!prog_adj)
    466 		return NULL;
    467 
    468 	prog_adj->len = insn_adj_cnt;
    469 
    470 	/* Patching happens in 3 steps:
    471 	 *
    472 	 * 1) Move over tail of insnsi from next instruction onwards,
    473 	 *    so we can patch the single target insn with one or more
    474 	 *    new ones (patching is always from 1 to n insns, n > 0).
    475 	 * 2) Inject new instructions at the target location.
    476 	 * 3) Adjust branch offsets if necessary.
    477 	 */
    478 	insn_rest = insn_adj_cnt - off - len;
    479 
    480 	memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1,
    481 		sizeof(*patch) * insn_rest);
    482 	memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
    483 
    484 	/* We are guaranteed to not fail at this point, otherwise
    485 	 * the ship has sailed to reverse to the original state. An
    486 	 * overflow cannot happen at this point.
    487 	 */
    488 	BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false));
    489 
    490 	bpf_adj_linfo(prog_adj, off, insn_delta);
    491 
    492 	return prog_adj;
    493 }
    494 
    495 int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
    496 {
    497 	/* Branch offsets can't overflow when program is shrinking, no need
    498 	 * to call bpf_adj_branches(..., true) here
    499 	 */
    500 	memmove(prog->insnsi + off, prog->insnsi + off + cnt,
    501 		sizeof(struct bpf_insn) * (prog->len - off - cnt));
    502 	prog->len -= cnt;
    503 
    504 	return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
    505 }
    506 
    507 void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
    508 {
    509 	int i;
    510 
    511 	for (i = 0; i < fp->aux->func_cnt; i++)
    512 		bpf_prog_kallsyms_del(fp->aux->func[i]);
    513 }
    514 
    515 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
    516 {
    517 	bpf_prog_kallsyms_del_subprogs(fp);
    518 	bpf_prog_kallsyms_del(fp);
    519 }
    520 
    521 #ifdef CONFIG_BPF_JIT
    522 /* All BPF JIT sysctl knobs here. */
    523 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
    524 int bpf_jit_harden   __read_mostly;
    525 int bpf_jit_kallsyms __read_mostly;
    526 long bpf_jit_limit   __read_mostly;
    527 
    528 static __always_inline void
    529 bpf_get_prog_addr_region(const struct bpf_prog *prog,
    530 			 unsigned long *symbol_start,
    531 			 unsigned long *symbol_end)
    532 {
    533 	const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
    534 	unsigned long addr = (unsigned long)hdr;
    535 
    536 	WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
    537 
    538 	*symbol_start = addr;
    539 	*symbol_end   = addr + hdr->pages * PAGE_SIZE;
    540 }
    541 
    542 void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
    543 {
    544 	const char *end = sym + KSYM_NAME_LEN;
    545 	const struct btf_type *type;
    546 	const char *func_name;
    547 
    548 	BUILD_BUG_ON(sizeof("bpf_prog_") +
    549 		     sizeof(prog->tag) * 2 +
    550 		     /* name has been null terminated.
    551 		      * We should need +1 for the '_' preceding
    552 		      * the name.  However, the null character
    553 		      * is double counted between the name and the
    554 		      * sizeof("bpf_prog_") above, so we omit
    555 		      * the +1 here.
    556 		      */
    557 		     sizeof(prog->aux->name) > KSYM_NAME_LEN);
    558 
    559 	sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
    560 	sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
    561 
    562 	/* prog->aux->name will be ignored if full btf name is available */
    563 	if (prog->aux->func_info_cnt) {
    564 		type = btf_type_by_id(prog->aux->btf,
    565 				      prog->aux->func_info[prog->aux->func_idx].type_id);
    566 		func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
    567 		snprintf(sym, (size_t)(end - sym), "_%s", func_name);
    568 		return;
    569 	}
    570 
    571 	if (prog->aux->name[0])
    572 		snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
    573 	else
    574 		*sym = 0;
    575 }
    576 
    577 static __always_inline unsigned long
    578 bpf_get_prog_addr_start(struct latch_tree_node *n)
    579 {
    580 	unsigned long symbol_start, symbol_end;
    581 	const struct bpf_prog_aux *aux;
    582 
    583 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
    584 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
    585 
    586 	return symbol_start;
    587 }
    588 
    589 static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
    590 					  struct latch_tree_node *b)
    591 {
    592 	return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
    593 }
    594 
    595 static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
    596 {
    597 	unsigned long val = (unsigned long)key;
    598 	unsigned long symbol_start, symbol_end;
    599 	const struct bpf_prog_aux *aux;
    600 
    601 	aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
    602 	bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
    603 
    604 	if (val < symbol_start)
    605 		return -1;
    606 	if (val >= symbol_end)
    607 		return  1;
    608 
    609 	return 0;
    610 }
    611 
    612 static const struct latch_tree_ops bpf_tree_ops = {
    613 	.less	= bpf_tree_less,
    614 	.comp	= bpf_tree_comp,
    615 };
    616 
    617 static DEFINE_SPINLOCK(bpf_lock);
    618 static LIST_HEAD(bpf_kallsyms);
    619 static struct latch_tree_root bpf_tree __cacheline_aligned;
    620 
    621 static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
    622 {
    623 	WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
    624 	list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
    625 	latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
    626 }
    627 
    628 static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
    629 {
    630 	if (list_empty(&aux->ksym_lnode))
    631 		return;
    632 
    633 	latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
    634 	list_del_rcu(&aux->ksym_lnode);
    635 }
    636 
    637 static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
    638 {
    639 	return fp->jited && !bpf_prog_was_classic(fp);
    640 }
    641 
    642 static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
    643 {
    644 	return list_empty(&fp->aux->ksym_lnode) ||
    645 	       fp->aux->ksym_lnode.prev == LIST_POISON2;
    646 }
    647 
    648 void bpf_prog_kallsyms_add(struct bpf_prog *fp)
    649 {
    650 	if (!bpf_prog_kallsyms_candidate(fp) ||
    651 	    !capable(CAP_SYS_ADMIN))
    652 		return;
    653 
    654 	spin_lock_bh(&bpf_lock);
    655 	bpf_prog_ksym_node_add(fp->aux);
    656 	spin_unlock_bh(&bpf_lock);
    657 }
    658 
    659 void bpf_prog_kallsyms_del(struct bpf_prog *fp)
    660 {
    661 	if (!bpf_prog_kallsyms_candidate(fp))
    662 		return;
    663 
    664 	spin_lock_bh(&bpf_lock);
    665 	bpf_prog_ksym_node_del(fp->aux);
    666 	spin_unlock_bh(&bpf_lock);
    667 }
    668 
    669 static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
    670 {
    671 	struct latch_tree_node *n;
    672 
    673 	if (!bpf_jit_kallsyms_enabled())
    674 		return NULL;
    675 
    676 	n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
    677 	return n ?
    678 	       container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
    679 	       NULL;
    680 }
    681 
    682 const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
    683 				 unsigned long *off, char *sym)
    684 {
    685 	unsigned long symbol_start, symbol_end;
    686 	struct bpf_prog *prog;
    687 	char *ret = NULL;
    688 
    689 	rcu_read_lock();
    690 	prog = bpf_prog_kallsyms_find(addr);
    691 	if (prog) {
    692 		bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
    693 		bpf_get_prog_name(prog, sym);
    694 
    695 		ret = sym;
    696 		if (size)
    697 			*size = symbol_end - symbol_start;
    698 		if (off)
    699 			*off  = addr - symbol_start;
    700 	}
    701 	rcu_read_unlock();
    702 
    703 	return ret;
    704 }
    705 
    706 bool is_bpf_text_address(unsigned long addr)
    707 {
    708 	bool ret;
    709 
    710 	rcu_read_lock();
    711 	ret = bpf_prog_kallsyms_find(addr) != NULL;
    712 	rcu_read_unlock();
    713 
    714 	return ret;
    715 }
    716 
    717 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
    718 		    char *sym)
    719 {
    720 	struct bpf_prog_aux *aux;
    721 	unsigned int it = 0;
    722 	int ret = -ERANGE;
    723 
    724 	if (!bpf_jit_kallsyms_enabled())
    725 		return ret;
    726 
    727 	rcu_read_lock();
    728 	list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
    729 		if (it++ != symnum)
    730 			continue;
    731 
    732 		bpf_get_prog_name(aux->prog, sym);
    733 
    734 		*value = (unsigned long)aux->prog->bpf_func;
    735 		*type  = BPF_SYM_ELF_TYPE;
    736 
    737 		ret = 0;
    738 		break;
    739 	}
    740 	rcu_read_unlock();
    741 
    742 	return ret;
    743 }
    744 
    745 static atomic_long_t bpf_jit_current;
    746 
    747 /* Can be overridden by an arch's JIT compiler if it has a custom,
    748  * dedicated BPF backend memory area, or if neither of the two
    749  * below apply.
    750  */
    751 u64 __weak bpf_jit_alloc_exec_limit(void)
    752 {
    753 #if defined(MODULES_VADDR)
    754 	return MODULES_END - MODULES_VADDR;
    755 #else
    756 	return VMALLOC_END - VMALLOC_START;
    757 #endif
    758 }
    759 
    760 static int __init bpf_jit_charge_init(void)
    761 {
    762 	/* Only used as heuristic here to derive limit. */
    763 	bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
    764 					    PAGE_SIZE), LONG_MAX);
    765 	return 0;
    766 }
    767 pure_initcall(bpf_jit_charge_init);
    768 
    769 static int bpf_jit_charge_modmem(u32 pages)
    770 {
    771 	if (atomic_long_add_return(pages, &bpf_jit_current) >
    772 	    (bpf_jit_limit >> PAGE_SHIFT)) {
    773 		if (!capable(CAP_SYS_ADMIN)) {
    774 			atomic_long_sub(pages, &bpf_jit_current);
    775 			return -EPERM;
    776 		}
    777 	}
    778 
    779 	return 0;
    780 }
    781 
    782 static void bpf_jit_uncharge_modmem(u32 pages)
    783 {
    784 	atomic_long_sub(pages, &bpf_jit_current);
    785 }
    786 
    787 void *__weak bpf_jit_alloc_exec(unsigned long size)
    788 {
    789 	return module_alloc(size);
    790 }
    791 
    792 void __weak bpf_jit_free_exec(void *addr)
    793 {
    794 	module_memfree(addr);
    795 }
    796 
    797 struct bpf_binary_header *
    798 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
    799 		     unsigned int alignment,
    800 		     bpf_jit_fill_hole_t bpf_fill_ill_insns)
    801 {
    802 	struct bpf_binary_header *hdr;
    803 	u32 size, hole, start, pages;
    804 
    805 	/* Most of BPF filters are really small, but if some of them
    806 	 * fill a page, allow at least 128 extra bytes to insert a
    807 	 * random section of illegal instructions.
    808 	 */
    809 	size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
    810 	pages = size / PAGE_SIZE;
    811 
    812 	if (bpf_jit_charge_modmem(pages))
    813 		return NULL;
    814 	hdr = bpf_jit_alloc_exec(size);
    815 	if (!hdr) {
    816 		bpf_jit_uncharge_modmem(pages);
    817 		return NULL;
    818 	}
    819 
    820 	/* Fill space with illegal/arch-dep instructions. */
    821 	bpf_fill_ill_insns(hdr, size);
    822 
    823 	hdr->pages = pages;
    824 	hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
    825 		     PAGE_SIZE - sizeof(*hdr));
    826 	start = (get_random_int() % hole) & ~(alignment - 1);
    827 
    828 	/* Leave a random number of instructions before BPF code. */
    829 	*image_ptr = &hdr->image[start];
    830 
    831 	return hdr;
    832 }
    833 
    834 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
    835 {
    836 	u32 pages = hdr->pages;
    837 
    838 	bpf_jit_free_exec(hdr);
    839 	bpf_jit_uncharge_modmem(pages);
    840 }
    841 
    842 /* This symbol is only overridden by archs that have different
    843  * requirements than the usual eBPF JITs, f.e. when they only
    844  * implement cBPF JIT, do not set images read-only, etc.
    845  */
    846 void __weak bpf_jit_free(struct bpf_prog *fp)
    847 {
    848 	if (fp->jited) {
    849 		struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
    850 
    851 		bpf_jit_binary_unlock_ro(hdr);
    852 		bpf_jit_binary_free(hdr);
    853 
    854 		WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
    855 	}
    856 
    857 	bpf_prog_unlock_free(fp);
    858 }
    859 
    860 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
    861 			  const struct bpf_insn *insn, bool extra_pass,
    862 			  u64 *func_addr, bool *func_addr_fixed)
    863 {
    864 	s16 off = insn->off;
    865 	s32 imm = insn->imm;
    866 	u8 *addr;
    867 
    868 	*func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
    869 	if (!*func_addr_fixed) {
    870 		/* Place-holder address till the last pass has collected
    871 		 * all addresses for JITed subprograms in which case we
    872 		 * can pick them up from prog->aux.
    873 		 */
    874 		if (!extra_pass)
    875 			addr = NULL;
    876 		else if (prog->aux->func &&
    877 			 off >= 0 && off < prog->aux->func_cnt)
    878 			addr = (u8 *)prog->aux->func[off]->bpf_func;
    879 		else
    880 			return -EINVAL;
    881 	} else {
    882 		/* Address of a BPF helper call. Since part of the core
    883 		 * kernel, it's always at a fixed location. __bpf_call_base
    884 		 * and the helper with imm relative to it are both in core
    885 		 * kernel.
    886 		 */
    887 		addr = (u8 *)__bpf_call_base + imm;
    888 	}
    889 
    890 	*func_addr = (unsigned long)addr;
    891 	return 0;
    892 }
    893 
    894 static int bpf_jit_blind_insn(const struct bpf_insn *from,
    895 			      const struct bpf_insn *aux,
    896 			      struct bpf_insn *to_buff)
    897 {
    898 	struct bpf_insn *to = to_buff;
    899 	u32 imm_rnd = get_random_int();
    900 	s16 off;
    901 
    902 	BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
    903 	BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
    904 
    905 	/* Constraints on AX register:
    906 	 *
    907 	 * AX register is inaccessible from user space. It is mapped in
    908 	 * all JITs, and used here for constant blinding rewrites. It is
    909 	 * typically "stateless" meaning its contents are only valid within
    910 	 * the executed instruction, but not across several instructions.
    911 	 * There are a few exceptions however which are further detailed
    912 	 * below.
    913 	 *
    914 	 * Constant blinding is only used by JITs, not in the interpreter.
    915 	 * The interpreter uses AX in some occasions as a local temporary
    916 	 * register e.g. in DIV or MOD instructions.
    917 	 *
    918 	 * In restricted circumstances, the verifier can also use the AX
    919 	 * register for rewrites as long as they do not interfere with
    920 	 * the above cases!
    921 	 */
    922 	if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX)
    923 		goto out;
    924 
    925 	if (from->imm == 0 &&
    926 	    (from->code == (BPF_ALU   | BPF_MOV | BPF_K) ||
    927 	     from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) {
    928 		*to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg);
    929 		goto out;
    930 	}
    931 
    932 	switch (from->code) {
    933 	case BPF_ALU | BPF_ADD | BPF_K:
    934 	case BPF_ALU | BPF_SUB | BPF_K:
    935 	case BPF_ALU | BPF_AND | BPF_K:
    936 	case BPF_ALU | BPF_OR  | BPF_K:
    937 	case BPF_ALU | BPF_XOR | BPF_K:
    938 	case BPF_ALU | BPF_MUL | BPF_K:
    939 	case BPF_ALU | BPF_MOV | BPF_K:
    940 	case BPF_ALU | BPF_DIV | BPF_K:
    941 	case BPF_ALU | BPF_MOD | BPF_K:
    942 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
    943 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
    944 		*to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX);
    945 		break;
    946 
    947 	case BPF_ALU64 | BPF_ADD | BPF_K:
    948 	case BPF_ALU64 | BPF_SUB | BPF_K:
    949 	case BPF_ALU64 | BPF_AND | BPF_K:
    950 	case BPF_ALU64 | BPF_OR  | BPF_K:
    951 	case BPF_ALU64 | BPF_XOR | BPF_K:
    952 	case BPF_ALU64 | BPF_MUL | BPF_K:
    953 	case BPF_ALU64 | BPF_MOV | BPF_K:
    954 	case BPF_ALU64 | BPF_DIV | BPF_K:
    955 	case BPF_ALU64 | BPF_MOD | BPF_K:
    956 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
    957 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
    958 		*to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX);
    959 		break;
    960 
    961 	case BPF_JMP | BPF_JEQ  | BPF_K:
    962 	case BPF_JMP | BPF_JNE  | BPF_K:
    963 	case BPF_JMP | BPF_JGT  | BPF_K:
    964 	case BPF_JMP | BPF_JLT  | BPF_K:
    965 	case BPF_JMP | BPF_JGE  | BPF_K:
    966 	case BPF_JMP | BPF_JLE  | BPF_K:
    967 	case BPF_JMP | BPF_JSGT | BPF_K:
    968 	case BPF_JMP | BPF_JSLT | BPF_K:
    969 	case BPF_JMP | BPF_JSGE | BPF_K:
    970 	case BPF_JMP | BPF_JSLE | BPF_K:
    971 	case BPF_JMP | BPF_JSET | BPF_K:
    972 		/* Accommodate for extra offset in case of a backjump. */
    973 		off = from->off;
    974 		if (off < 0)
    975 			off -= 2;
    976 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
    977 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
    978 		*to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
    979 		break;
    980 
    981 	case BPF_JMP32 | BPF_JEQ  | BPF_K:
    982 	case BPF_JMP32 | BPF_JNE  | BPF_K:
    983 	case BPF_JMP32 | BPF_JGT  | BPF_K:
    984 	case BPF_JMP32 | BPF_JLT  | BPF_K:
    985 	case BPF_JMP32 | BPF_JGE  | BPF_K:
    986 	case BPF_JMP32 | BPF_JLE  | BPF_K:
    987 	case BPF_JMP32 | BPF_JSGT | BPF_K:
    988 	case BPF_JMP32 | BPF_JSLT | BPF_K:
    989 	case BPF_JMP32 | BPF_JSGE | BPF_K:
    990 	case BPF_JMP32 | BPF_JSLE | BPF_K:
    991 	case BPF_JMP32 | BPF_JSET | BPF_K:
    992 		/* Accommodate for extra offset in case of a backjump. */
    993 		off = from->off;
    994 		if (off < 0)
    995 			off -= 2;
    996 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
    997 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
    998 		*to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
    999 				      off);
   1000 		break;
   1001 
   1002 	case BPF_LD | BPF_IMM | BPF_DW:
   1003 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
   1004 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
   1005 		*to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
   1006 		*to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX);
   1007 		break;
   1008 	case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */
   1009 		*to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm);
   1010 		*to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
   1011 		*to++ = BPF_ALU64_REG(BPF_OR,  aux[0].dst_reg, BPF_REG_AX);
   1012 		break;
   1013 
   1014 	case BPF_ST | BPF_MEM | BPF_DW:
   1015 	case BPF_ST | BPF_MEM | BPF_W:
   1016 	case BPF_ST | BPF_MEM | BPF_H:
   1017 	case BPF_ST | BPF_MEM | BPF_B:
   1018 		*to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
   1019 		*to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
   1020 		*to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off);
   1021 		break;
   1022 	}
   1023 out:
   1024 	return to - to_buff;
   1025 }
   1026 
   1027 static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
   1028 					      gfp_t gfp_extra_flags)
   1029 {
   1030 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
   1031 	struct bpf_prog *fp;
   1032 
   1033 	fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
   1034 	if (fp != NULL) {
   1035 		/* aux->prog still points to the fp_other one, so
   1036 		 * when promoting the clone to the real program,
   1037 		 * this still needs to be adapted.
   1038 		 */
   1039 		memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE);
   1040 	}
   1041 
   1042 	return fp;
   1043 }
   1044 
   1045 static void bpf_prog_clone_free(struct bpf_prog *fp)
   1046 {
   1047 	/* aux was stolen by the other clone, so we cannot free
   1048 	 * it from this path! It will be freed eventually by the
   1049 	 * other program on release.
   1050 	 *
   1051 	 * At this point, we don't need a deferred release since
   1052 	 * clone is guaranteed to not be locked.
   1053 	 */
   1054 	fp->aux = NULL;
   1055 	__bpf_prog_free(fp);
   1056 }
   1057 
   1058 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other)
   1059 {
   1060 	/* We have to repoint aux->prog to self, as we don't
   1061 	 * know whether fp here is the clone or the original.
   1062 	 */
   1063 	fp->aux->prog = fp;
   1064 	bpf_prog_clone_free(fp_other);
   1065 }
   1066 
   1067 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
   1068 {
   1069 	struct bpf_insn insn_buff[16], aux[2];
   1070 	struct bpf_prog *clone, *tmp;
   1071 	int insn_delta, insn_cnt;
   1072 	struct bpf_insn *insn;
   1073 	int i, rewritten;
   1074 
   1075 	if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
   1076 		return prog;
   1077 
   1078 	clone = bpf_prog_clone_create(prog, GFP_USER);
   1079 	if (!clone)
   1080 		return ERR_PTR(-ENOMEM);
   1081 
   1082 	insn_cnt = clone->len;
   1083 	insn = clone->insnsi;
   1084 
   1085 	for (i = 0; i < insn_cnt; i++, insn++) {
   1086 		/* We temporarily need to hold the original ld64 insn
   1087 		 * so that we can still access the first part in the
   1088 		 * second blinding run.
   1089 		 */
   1090 		if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
   1091 		    insn[1].code == 0)
   1092 			memcpy(aux, insn, sizeof(aux));
   1093 
   1094 		rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
   1095 		if (!rewritten)
   1096 			continue;
   1097 
   1098 		tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
   1099 		if (!tmp) {
   1100 			/* Patching may have repointed aux->prog during
   1101 			 * realloc from the original one, so we need to
   1102 			 * fix it up here on error.
   1103 			 */
   1104 			bpf_jit_prog_release_other(prog, clone);
   1105 			return ERR_PTR(-ENOMEM);
   1106 		}
   1107 
   1108 		clone = tmp;
   1109 		insn_delta = rewritten - 1;
   1110 
   1111 		/* Walk new program and skip insns we just inserted. */
   1112 		insn = clone->insnsi + i + insn_delta;
   1113 		insn_cnt += insn_delta;
   1114 		i        += insn_delta;
   1115 	}
   1116 
   1117 	clone->blinded = 1;
   1118 	return clone;
   1119 }
   1120 #endif /* CONFIG_BPF_JIT */
   1121 
   1122 /* Base function for offset calculation. Needs to go into .text section,
   1123  * therefore keeping it non-static as well; will also be used by JITs
   1124  * anyway later on, so do not let the compiler omit it. This also needs
   1125  * to go into kallsyms for correlation from e.g. bpftool, so naming
   1126  * must not change.
   1127  */
   1128 noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
   1129 {
   1130 	return 0;
   1131 }
   1132 EXPORT_SYMBOL_GPL(__bpf_call_base);
   1133 
   1134 /* All UAPI available opcodes. */
   1135 #define BPF_INSN_MAP(INSN_2, INSN_3)		\
   1136 	/* 32 bit ALU operations. */		\
   1137 	/*   Register based. */			\
   1138 	INSN_3(ALU, ADD,  X),			\
   1139 	INSN_3(ALU, SUB,  X),			\
   1140 	INSN_3(ALU, AND,  X),			\
   1141 	INSN_3(ALU, OR,   X),			\
   1142 	INSN_3(ALU, LSH,  X),			\
   1143 	INSN_3(ALU, RSH,  X),			\
   1144 	INSN_3(ALU, XOR,  X),			\
   1145 	INSN_3(ALU, MUL,  X),			\
   1146 	INSN_3(ALU, MOV,  X),			\
   1147 	INSN_3(ALU, ARSH, X),			\
   1148 	INSN_3(ALU, DIV,  X),			\
   1149 	INSN_3(ALU, MOD,  X),			\
   1150 	INSN_2(ALU, NEG),			\
   1151 	INSN_3(ALU, END, TO_BE),		\
   1152 	INSN_3(ALU, END, TO_LE),		\
   1153 	/*   Immediate based. */		\
   1154 	INSN_3(ALU, ADD,  K),			\
   1155 	INSN_3(ALU, SUB,  K),			\
   1156 	INSN_3(ALU, AND,  K),			\
   1157 	INSN_3(ALU, OR,   K),			\
   1158 	INSN_3(ALU, LSH,  K),			\
   1159 	INSN_3(ALU, RSH,  K),			\
   1160 	INSN_3(ALU, XOR,  K),			\
   1161 	INSN_3(ALU, MUL,  K),			\
   1162 	INSN_3(ALU, MOV,  K),			\
   1163 	INSN_3(ALU, ARSH, K),			\
   1164 	INSN_3(ALU, DIV,  K),			\
   1165 	INSN_3(ALU, MOD,  K),			\
   1166 	/* 64 bit ALU operations. */		\
   1167 	/*   Register based. */			\
   1168 	INSN_3(ALU64, ADD,  X),			\
   1169 	INSN_3(ALU64, SUB,  X),			\
   1170 	INSN_3(ALU64, AND,  X),			\
   1171 	INSN_3(ALU64, OR,   X),			\
   1172 	INSN_3(ALU64, LSH,  X),			\
   1173 	INSN_3(ALU64, RSH,  X),			\
   1174 	INSN_3(ALU64, XOR,  X),			\
   1175 	INSN_3(ALU64, MUL,  X),			\
   1176 	INSN_3(ALU64, MOV,  X),			\
   1177 	INSN_3(ALU64, ARSH, X),			\
   1178 	INSN_3(ALU64, DIV,  X),			\
   1179 	INSN_3(ALU64, MOD,  X),			\
   1180 	INSN_2(ALU64, NEG),			\
   1181 	/*   Immediate based. */		\
   1182 	INSN_3(ALU64, ADD,  K),			\
   1183 	INSN_3(ALU64, SUB,  K),			\
   1184 	INSN_3(ALU64, AND,  K),			\
   1185 	INSN_3(ALU64, OR,   K),			\
   1186 	INSN_3(ALU64, LSH,  K),			\
   1187 	INSN_3(ALU64, RSH,  K),			\
   1188 	INSN_3(ALU64, XOR,  K),			\
   1189 	INSN_3(ALU64, MUL,  K),			\
   1190 	INSN_3(ALU64, MOV,  K),			\
   1191 	INSN_3(ALU64, ARSH, K),			\
   1192 	INSN_3(ALU64, DIV,  K),			\
   1193 	INSN_3(ALU64, MOD,  K),			\
   1194 	/* Call instruction. */			\
   1195 	INSN_2(JMP, CALL),			\
   1196 	/* Exit instruction. */			\
   1197 	INSN_2(JMP, EXIT),			\
   1198 	/* 32-bit Jump instructions. */		\
   1199 	/*   Register based. */			\
   1200 	INSN_3(JMP32, JEQ,  X),			\
   1201 	INSN_3(JMP32, JNE,  X),			\
   1202 	INSN_3(JMP32, JGT,  X),			\
   1203 	INSN_3(JMP32, JLT,  X),			\
   1204 	INSN_3(JMP32, JGE,  X),			\
   1205 	INSN_3(JMP32, JLE,  X),			\
   1206 	INSN_3(JMP32, JSGT, X),			\
   1207 	INSN_3(JMP32, JSLT, X),			\
   1208 	INSN_3(JMP32, JSGE, X),			\
   1209 	INSN_3(JMP32, JSLE, X),			\
   1210 	INSN_3(JMP32, JSET, X),			\
   1211 	/*   Immediate based. */		\
   1212 	INSN_3(JMP32, JEQ,  K),			\
   1213 	INSN_3(JMP32, JNE,  K),			\
   1214 	INSN_3(JMP32, JGT,  K),			\
   1215 	INSN_3(JMP32, JLT,  K),			\
   1216 	INSN_3(JMP32, JGE,  K),			\
   1217 	INSN_3(JMP32, JLE,  K),			\
   1218 	INSN_3(JMP32, JSGT, K),			\
   1219 	INSN_3(JMP32, JSLT, K),			\
   1220 	INSN_3(JMP32, JSGE, K),			\
   1221 	INSN_3(JMP32, JSLE, K),			\
   1222 	INSN_3(JMP32, JSET, K),			\
   1223 	/* Jump instructions. */		\
   1224 	/*   Register based. */			\
   1225 	INSN_3(JMP, JEQ,  X),			\
   1226 	INSN_3(JMP, JNE,  X),			\
   1227 	INSN_3(JMP, JGT,  X),			\
   1228 	INSN_3(JMP, JLT,  X),			\
   1229 	INSN_3(JMP, JGE,  X),			\
   1230 	INSN_3(JMP, JLE,  X),			\
   1231 	INSN_3(JMP, JSGT, X),			\
   1232 	INSN_3(JMP, JSLT, X),			\
   1233 	INSN_3(JMP, JSGE, X),			\
   1234 	INSN_3(JMP, JSLE, X),			\
   1235 	INSN_3(JMP, JSET, X),			\
   1236 	/*   Immediate based. */		\
   1237 	INSN_3(JMP, JEQ,  K),			\
   1238 	INSN_3(JMP, JNE,  K),			\
   1239 	INSN_3(JMP, JGT,  K),			\
   1240 	INSN_3(JMP, JLT,  K),			\
   1241 	INSN_3(JMP, JGE,  K),			\
   1242 	INSN_3(JMP, JLE,  K),			\
   1243 	INSN_3(JMP, JSGT, K),			\
   1244 	INSN_3(JMP, JSLT, K),			\
   1245 	INSN_3(JMP, JSGE, K),			\
   1246 	INSN_3(JMP, JSLE, K),			\
   1247 	INSN_3(JMP, JSET, K),			\
   1248 	INSN_2(JMP, JA),			\
   1249 	/* Store instructions. */		\
   1250 	/*   Register based. */			\
   1251 	INSN_3(STX, MEM,  B),			\
   1252 	INSN_3(STX, MEM,  H),			\
   1253 	INSN_3(STX, MEM,  W),			\
   1254 	INSN_3(STX, MEM,  DW),			\
   1255 	INSN_3(STX, XADD, W),			\
   1256 	INSN_3(STX, XADD, DW),			\
   1257 	/*   Immediate based. */		\
   1258 	INSN_3(ST, MEM, B),			\
   1259 	INSN_3(ST, MEM, H),			\
   1260 	INSN_3(ST, MEM, W),			\
   1261 	INSN_3(ST, MEM, DW),			\
   1262 	/* Load instructions. */		\
   1263 	/*   Register based. */			\
   1264 	INSN_3(LDX, MEM, B),			\
   1265 	INSN_3(LDX, MEM, H),			\
   1266 	INSN_3(LDX, MEM, W),			\
   1267 	INSN_3(LDX, MEM, DW),			\
   1268 	/*   Immediate based. */		\
   1269 	INSN_3(LD, IMM, DW)
   1270 
   1271 bool bpf_opcode_in_insntable(u8 code)
   1272 {
   1273 #define BPF_INSN_2_TBL(x, y)    [BPF_##x | BPF_##y] = true
   1274 #define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true
   1275 	static const bool public_insntable[256] = {
   1276 		[0 ... 255] = false,
   1277 		/* Now overwrite non-defaults ... */
   1278 		BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
   1279 		/* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
   1280 		[BPF_LD | BPF_ABS | BPF_B] = true,
   1281 		[BPF_LD | BPF_ABS | BPF_H] = true,
   1282 		[BPF_LD | BPF_ABS | BPF_W] = true,
   1283 		[BPF_LD | BPF_IND | BPF_B] = true,
   1284 		[BPF_LD | BPF_IND | BPF_H] = true,
   1285 		[BPF_LD | BPF_IND | BPF_W] = true,
   1286 	};
   1287 #undef BPF_INSN_3_TBL
   1288 #undef BPF_INSN_2_TBL
   1289 	return public_insntable[code];
   1290 }
   1291 
   1292 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
   1293 /**
   1294  *	__bpf_prog_run - run eBPF program on a given context
   1295  *	@regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers
   1296  *	@insn: is the array of eBPF instructions
   1297  *	@stack: is the eBPF storage stack
   1298  *
   1299  * Decode and execute eBPF instructions.
   1300  */
   1301 static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
   1302 {
   1303 #define BPF_INSN_2_LBL(x, y)    [BPF_##x | BPF_##y] = &&x##_##y
   1304 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
   1305 	static const void *jumptable[256] = {
   1306 		[0 ... 255] = &&default_label,
   1307 		/* Now overwrite non-defaults ... */
   1308 		BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
   1309 		/* Non-UAPI available opcodes. */
   1310 		[BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS,
   1311 		[BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL,
   1312 	};
   1313 #undef BPF_INSN_3_LBL
   1314 #undef BPF_INSN_2_LBL
   1315 	u32 tail_call_cnt = 0;
   1316 
   1317 #define CONT	 ({ insn++; goto select_insn; })
   1318 #define CONT_JMP ({ insn++; goto select_insn; })
   1319 
   1320 select_insn:
   1321 	goto *jumptable[insn->code];
   1322 
   1323 	/* ALU */
   1324 #define ALU(OPCODE, OP)			\
   1325 	ALU64_##OPCODE##_X:		\
   1326 		DST = DST OP SRC;	\
   1327 		CONT;			\
   1328 	ALU_##OPCODE##_X:		\
   1329 		DST = (u32) DST OP (u32) SRC;	\
   1330 		CONT;			\
   1331 	ALU64_##OPCODE##_K:		\
   1332 		DST = DST OP IMM;		\
   1333 		CONT;			\
   1334 	ALU_##OPCODE##_K:		\
   1335 		DST = (u32) DST OP (u32) IMM;	\
   1336 		CONT;
   1337 
   1338 	ALU(ADD,  +)
   1339 	ALU(SUB,  -)
   1340 	ALU(AND,  &)
   1341 	ALU(OR,   |)
   1342 	ALU(LSH, <<)
   1343 	ALU(RSH, >>)
   1344 	ALU(XOR,  ^)
   1345 	ALU(MUL,  *)
   1346 #undef ALU
   1347 	ALU_NEG:
   1348 		DST = (u32) -DST;
   1349 		CONT;
   1350 	ALU64_NEG:
   1351 		DST = -DST;
   1352 		CONT;
   1353 	ALU_MOV_X:
   1354 		DST = (u32) SRC;
   1355 		CONT;
   1356 	ALU_MOV_K:
   1357 		DST = (u32) IMM;
   1358 		CONT;
   1359 	ALU64_MOV_X:
   1360 		DST = SRC;
   1361 		CONT;
   1362 	ALU64_MOV_K:
   1363 		DST = IMM;
   1364 		CONT;
   1365 	LD_IMM_DW:
   1366 		DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
   1367 		insn++;
   1368 		CONT;
   1369 	ALU_ARSH_X:
   1370 		DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
   1371 		CONT;
   1372 	ALU_ARSH_K:
   1373 		DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
   1374 		CONT;
   1375 	ALU64_ARSH_X:
   1376 		(*(s64 *) &DST) >>= SRC;
   1377 		CONT;
   1378 	ALU64_ARSH_K:
   1379 		(*(s64 *) &DST) >>= IMM;
   1380 		CONT;
   1381 	ALU64_MOD_X:
   1382 		div64_u64_rem(DST, SRC, &AX);
   1383 		DST = AX;
   1384 		CONT;
   1385 	ALU_MOD_X:
   1386 		AX = (u32) DST;
   1387 		DST = do_div(AX, (u32) SRC);
   1388 		CONT;
   1389 	ALU64_MOD_K:
   1390 		div64_u64_rem(DST, IMM, &AX);
   1391 		DST = AX;
   1392 		CONT;
   1393 	ALU_MOD_K:
   1394 		AX = (u32) DST;
   1395 		DST = do_div(AX, (u32) IMM);
   1396 		CONT;
   1397 	ALU64_DIV_X:
   1398 		DST = div64_u64(DST, SRC);
   1399 		CONT;
   1400 	ALU_DIV_X:
   1401 		AX = (u32) DST;
   1402 		do_div(AX, (u32) SRC);
   1403 		DST = (u32) AX;
   1404 		CONT;
   1405 	ALU64_DIV_K:
   1406 		DST = div64_u64(DST, IMM);
   1407 		CONT;
   1408 	ALU_DIV_K:
   1409 		AX = (u32) DST;
   1410 		do_div(AX, (u32) IMM);
   1411 		DST = (u32) AX;
   1412 		CONT;
   1413 	ALU_END_TO_BE:
   1414 		switch (IMM) {
   1415 		case 16:
   1416 			DST = (__force u16) cpu_to_be16(DST);
   1417 			break;
   1418 		case 32:
   1419 			DST = (__force u32) cpu_to_be32(DST);
   1420 			break;
   1421 		case 64:
   1422 			DST = (__force u64) cpu_to_be64(DST);
   1423 			break;
   1424 		}
   1425 		CONT;
   1426 	ALU_END_TO_LE:
   1427 		switch (IMM) {
   1428 		case 16:
   1429 			DST = (__force u16) cpu_to_le16(DST);
   1430 			break;
   1431 		case 32:
   1432 			DST = (__force u32) cpu_to_le32(DST);
   1433 			break;
   1434 		case 64:
   1435 			DST = (__force u64) cpu_to_le64(DST);
   1436 			break;
   1437 		}
   1438 		CONT;
   1439 
   1440 	/* CALL */
   1441 	JMP_CALL:
   1442 		/* Function call scratches BPF_R1-BPF_R5 registers,
   1443 		 * preserves BPF_R6-BPF_R9, and stores return value
   1444 		 * into BPF_R0.
   1445 		 */
   1446 		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
   1447 						       BPF_R4, BPF_R5);
   1448 		CONT;
   1449 
   1450 	JMP_CALL_ARGS:
   1451 		BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2,
   1452 							    BPF_R3, BPF_R4,
   1453 							    BPF_R5,
   1454 							    insn + insn->off + 1);
   1455 		CONT;
   1456 
   1457 	JMP_TAIL_CALL: {
   1458 		struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
   1459 		struct bpf_array *array = container_of(map, struct bpf_array, map);
   1460 		struct bpf_prog *prog;
   1461 		u32 index = BPF_R3;
   1462 
   1463 		if (unlikely(index >= array->map.max_entries))
   1464 			goto out;
   1465 		if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT))
   1466 			goto out;
   1467 
   1468 		tail_call_cnt++;
   1469 
   1470 		prog = READ_ONCE(array->ptrs[index]);
   1471 		if (!prog)
   1472 			goto out;
   1473 
   1474 		/* ARG1 at this point is guaranteed to point to CTX from
   1475 		 * the verifier side due to the fact that the tail call is
   1476 		 * handeled like a helper, that is, bpf_tail_call_proto,
   1477 		 * where arg1_type is ARG_PTR_TO_CTX.
   1478 		 */
   1479 		insn = prog->insnsi;
   1480 		goto select_insn;
   1481 out:
   1482 		CONT;
   1483 	}
   1484 	JMP_JA:
   1485 		insn += insn->off;
   1486 		CONT;
   1487 	JMP_EXIT:
   1488 		return BPF_R0;
   1489 	/* JMP */
   1490 #define COND_JMP(SIGN, OPCODE, CMP_OP)				\
   1491 	JMP_##OPCODE##_X:					\
   1492 		if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) {	\
   1493 			insn += insn->off;			\
   1494 			CONT_JMP;				\
   1495 		}						\
   1496 		CONT;						\
   1497 	JMP32_##OPCODE##_X:					\
   1498 		if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) {	\
   1499 			insn += insn->off;			\
   1500 			CONT_JMP;				\
   1501 		}						\
   1502 		CONT;						\
   1503 	JMP_##OPCODE##_K:					\
   1504 		if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) {	\
   1505 			insn += insn->off;			\
   1506 			CONT_JMP;				\
   1507 		}						\
   1508 		CONT;						\
   1509 	JMP32_##OPCODE##_K:					\
   1510 		if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) {	\
   1511 			insn += insn->off;			\
   1512 			CONT_JMP;				\
   1513 		}						\
   1514 		CONT;
   1515 	COND_JMP(u, JEQ, ==)
   1516 	COND_JMP(u, JNE, !=)
   1517 	COND_JMP(u, JGT, >)
   1518 	COND_JMP(u, JLT, <)
   1519 	COND_JMP(u, JGE, >=)
   1520 	COND_JMP(u, JLE, <=)
   1521 	COND_JMP(u, JSET, &)
   1522 	COND_JMP(s, JSGT, >)
   1523 	COND_JMP(s, JSLT, <)
   1524 	COND_JMP(s, JSGE, >=)
   1525 	COND_JMP(s, JSLE, <=)
   1526 #undef COND_JMP
   1527 	/* STX and ST and LDX*/
   1528 #define LDST(SIZEOP, SIZE)						\
   1529 	STX_MEM_##SIZEOP:						\
   1530 		*(SIZE *)(unsigned long) (DST + insn->off) = SRC;	\
   1531 		CONT;							\
   1532 	ST_MEM_##SIZEOP:						\
   1533 		*(SIZE *)(unsigned long) (DST + insn->off) = IMM;	\
   1534 		CONT;							\
   1535 	LDX_MEM_##SIZEOP:						\
   1536 		DST = *(SIZE *)(unsigned long) (SRC + insn->off);	\
   1537 		CONT;
   1538 
   1539 	LDST(B,   u8)
   1540 	LDST(H,  u16)
   1541 	LDST(W,  u32)
   1542 	LDST(DW, u64)
   1543 #undef LDST
   1544 	STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
   1545 		atomic_add((u32) SRC, (atomic_t *)(unsigned long)
   1546 			   (DST + insn->off));
   1547 		CONT;
   1548 	STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
   1549 		atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
   1550 			     (DST + insn->off));
   1551 		CONT;
   1552 
   1553 	default_label:
   1554 		/* If we ever reach this, we have a bug somewhere. Die hard here
   1555 		 * instead of just returning 0; we could be somewhere in a subprog,
   1556 		 * so execution could continue otherwise which we do /not/ want.
   1557 		 *
   1558 		 * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable().
   1559 		 */
   1560 		pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code);
   1561 		BUG_ON(1);
   1562 		return 0;
   1563 }
   1564 STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
   1565 
   1566 #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
   1567 #define DEFINE_BPF_PROG_RUN(stack_size) \
   1568 static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \
   1569 { \
   1570 	u64 stack[stack_size / sizeof(u64)]; \
   1571 	u64 regs[MAX_BPF_EXT_REG]; \
   1572 \
   1573 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
   1574 	ARG1 = (u64) (unsigned long) ctx; \
   1575 	return ___bpf_prog_run(regs, insn, stack); \
   1576 }
   1577 
   1578 #define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size
   1579 #define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \
   1580 static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \
   1581 				      const struct bpf_insn *insn) \
   1582 { \
   1583 	u64 stack[stack_size / sizeof(u64)]; \
   1584 	u64 regs[MAX_BPF_EXT_REG]; \
   1585 \
   1586 	FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \
   1587 	BPF_R1 = r1; \
   1588 	BPF_R2 = r2; \
   1589 	BPF_R3 = r3; \
   1590 	BPF_R4 = r4; \
   1591 	BPF_R5 = r5; \
   1592 	return ___bpf_prog_run(regs, insn, stack); \
   1593 }
   1594 
   1595 #define EVAL1(FN, X) FN(X)
   1596 #define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y)
   1597 #define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y)
   1598 #define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y)
   1599 #define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y)
   1600 #define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y)
   1601 
   1602 EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192);
   1603 EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384);
   1604 EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512);
   1605 
   1606 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192);
   1607 EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384);
   1608 EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512);
   1609 
   1610 #define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size),
   1611 
   1612 static unsigned int (*interpreters[])(const void *ctx,
   1613 				      const struct bpf_insn *insn) = {
   1614 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
   1615 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
   1616 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
   1617 };
   1618 #undef PROG_NAME_LIST
   1619 #define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size),
   1620 static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5,
   1621 				  const struct bpf_insn *insn) = {
   1622 EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192)
   1623 EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384)
   1624 EVAL4(PROG_NAME_LIST, 416, 448, 480, 512)
   1625 };
   1626 #undef PROG_NAME_LIST
   1627 
   1628 void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
   1629 {
   1630 	stack_depth = max_t(u32, stack_depth, 1);
   1631 	insn->off = (s16) insn->imm;
   1632 	insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] -
   1633 		__bpf_call_base_args;
   1634 	insn->code = BPF_JMP | BPF_CALL_ARGS;
   1635 }
   1636 
   1637 #else
   1638 static unsigned int __bpf_prog_ret0_warn(const void *ctx,
   1639 					 const struct bpf_insn *insn)
   1640 {
   1641 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
   1642 	 * is not working properly, so warn about it!
   1643 	 */
   1644 	WARN_ON_ONCE(1);
   1645 	return 0;
   1646 }
   1647 #endif
   1648 
   1649 bool bpf_prog_array_compatible(struct bpf_array *array,
   1650 			       const struct bpf_prog *fp)
   1651 {
   1652 	if (fp->kprobe_override)
   1653 		return false;
   1654 
   1655 	if (!array->owner_prog_type) {
   1656 		/* There's no owner yet where we could check for
   1657 		 * compatibility.
   1658 		 */
   1659 		array->owner_prog_type = fp->type;
   1660 		array->owner_jited = fp->jited;
   1661 
   1662 		return true;
   1663 	}
   1664 
   1665 	return array->owner_prog_type == fp->type &&
   1666 	       array->owner_jited == fp->jited;
   1667 }
   1668 
   1669 static int bpf_check_tail_call(const struct bpf_prog *fp)
   1670 {
   1671 	struct bpf_prog_aux *aux = fp->aux;
   1672 	int i;
   1673 
   1674 	for (i = 0; i < aux->used_map_cnt; i++) {
   1675 		struct bpf_map *map = aux->used_maps[i];
   1676 		struct bpf_array *array;
   1677 
   1678 		if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
   1679 			continue;
   1680 
   1681 		array = container_of(map, struct bpf_array, map);
   1682 		if (!bpf_prog_array_compatible(array, fp))
   1683 			return -EINVAL;
   1684 	}
   1685 
   1686 	return 0;
   1687 }
   1688 
   1689 static void bpf_prog_select_func(struct bpf_prog *fp)
   1690 {
   1691 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
   1692 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
   1693 
   1694 	fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
   1695 #else
   1696 	fp->bpf_func = __bpf_prog_ret0_warn;
   1697 #endif
   1698 }
   1699 
   1700 /**
   1701  *	bpf_prog_select_runtime - select exec runtime for BPF program
   1702  *	@fp: bpf_prog populated with internal BPF program
   1703  *	@err: pointer to error variable
   1704  *
   1705  * Try to JIT eBPF program, if JIT is not available, use interpreter.
   1706  * The BPF program will be executed via BPF_PROG_RUN() macro.
   1707  */
   1708 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
   1709 {
   1710 	/* In case of BPF to BPF calls, verifier did all the prep
   1711 	 * work with regards to JITing, etc.
   1712 	 */
   1713 	if (fp->bpf_func)
   1714 		goto finalize;
   1715 
   1716 	bpf_prog_select_func(fp);
   1717 
   1718 	/* eBPF JITs can rewrite the program in case constant
   1719 	 * blinding is active. However, in case of error during
   1720 	 * blinding, bpf_int_jit_compile() must always return a
   1721 	 * valid program, which in this case would simply not
   1722 	 * be JITed, but falls back to the interpreter.
   1723 	 */
   1724 	if (!bpf_prog_is_dev_bound(fp->aux)) {
   1725 		*err = bpf_prog_alloc_jited_linfo(fp);
   1726 		if (*err)
   1727 			return fp;
   1728 
   1729 		fp = bpf_int_jit_compile(fp);
   1730 		if (!fp->jited) {
   1731 			bpf_prog_free_jited_linfo(fp);
   1732 #ifdef CONFIG_BPF_JIT_ALWAYS_ON
   1733 			*err = -ENOTSUPP;
   1734 			return fp;
   1735 #endif
   1736 		} else {
   1737 			bpf_prog_free_unused_jited_linfo(fp);
   1738 		}
   1739 	} else {
   1740 		*err = bpf_prog_offload_compile(fp);
   1741 		if (*err)
   1742 			return fp;
   1743 	}
   1744 
   1745 finalize:
   1746 	bpf_prog_lock_ro(fp);
   1747 
   1748 	/* The tail call compatibility check can only be done at
   1749 	 * this late stage as we need to determine, if we deal
   1750 	 * with JITed or non JITed program concatenations and not
   1751 	 * all eBPF JITs might immediately support all features.
   1752 	 */
   1753 	*err = bpf_check_tail_call(fp);
   1754 
   1755 	return fp;
   1756 }
   1757 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
   1758 
   1759 static unsigned int __bpf_prog_ret1(const void *ctx,
   1760 				    const struct bpf_insn *insn)
   1761 {
   1762 	return 1;
   1763 }
   1764 
   1765 static struct bpf_prog_dummy {
   1766 	struct bpf_prog prog;
   1767 } dummy_bpf_prog = {
   1768 	.prog = {
   1769 		.bpf_func = __bpf_prog_ret1,
   1770 	},
   1771 };
   1772 
   1773 /* to avoid allocating empty bpf_prog_array for cgroups that
   1774  * don't have bpf program attached use one global 'empty_prog_array'
   1775  * It will not be modified the caller of bpf_prog_array_alloc()
   1776  * (since caller requested prog_cnt == 0)
   1777  * that pointer should be 'freed' by bpf_prog_array_free()
   1778  */
   1779 static struct {
   1780 	struct bpf_prog_array hdr;
   1781 	struct bpf_prog *null_prog;
   1782 } empty_prog_array = {
   1783 	.null_prog = NULL,
   1784 };
   1785 
   1786 struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags)
   1787 {
   1788 	if (prog_cnt)
   1789 		return kzalloc(sizeof(struct bpf_prog_array) +
   1790 			       sizeof(struct bpf_prog_array_item) *
   1791 			       (prog_cnt + 1),
   1792 			       flags);
   1793 
   1794 	return &empty_prog_array.hdr;
   1795 }
   1796 
   1797 void bpf_prog_array_free(struct bpf_prog_array __rcu *progs)
   1798 {
   1799 	if (!progs ||
   1800 	    progs == (struct bpf_prog_array __rcu *)&empty_prog_array.hdr)
   1801 		return;
   1802 	kfree_rcu(progs, rcu);
   1803 }
   1804 
   1805 int bpf_prog_array_length(struct bpf_prog_array __rcu *array)
   1806 {
   1807 	struct bpf_prog_array_item *item;
   1808 	u32 cnt = 0;
   1809 
   1810 	rcu_read_lock();
   1811 	item = rcu_dereference(array)->items;
   1812 	for (; item->prog; item++)
   1813 		if (item->prog != &dummy_bpf_prog.prog)
   1814 			cnt++;
   1815 	rcu_read_unlock();
   1816 	return cnt;
   1817 }
   1818 
   1819 
   1820 static bool bpf_prog_array_copy_core(struct bpf_prog_array __rcu *array,
   1821 				     u32 *prog_ids,
   1822 				     u32 request_cnt)
   1823 {
   1824 	struct bpf_prog_array_item *item;
   1825 	int i = 0;
   1826 
   1827 	item = rcu_dereference_check(array, 1)->items;
   1828 	for (; item->prog; item++) {
   1829 		if (item->prog == &dummy_bpf_prog.prog)
   1830 			continue;
   1831 		prog_ids[i] = item->prog->aux->id;
   1832 		if (++i == request_cnt) {
   1833 			item++;
   1834 			break;
   1835 		}
   1836 	}
   1837 
   1838 	return !!(item->prog);
   1839 }
   1840 
   1841 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *array,
   1842 				__u32 __user *prog_ids, u32 cnt)
   1843 {
   1844 	unsigned long err = 0;
   1845 	bool nospc;
   1846 	u32 *ids;
   1847 
   1848 	/* users of this function are doing:
   1849 	 * cnt = bpf_prog_array_length();
   1850 	 * if (cnt > 0)
   1851 	 *     bpf_prog_array_copy_to_user(..., cnt);
   1852 	 * so below kcalloc doesn't need extra cnt > 0 check, but
   1853 	 * bpf_prog_array_length() releases rcu lock and
   1854 	 * prog array could have been swapped with empty or larger array,
   1855 	 * so always copy 'cnt' prog_ids to the user.
   1856 	 * In a rare race the user will see zero prog_ids
   1857 	 */
   1858 	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
   1859 	if (!ids)
   1860 		return -ENOMEM;
   1861 	rcu_read_lock();
   1862 	nospc = bpf_prog_array_copy_core(array, ids, cnt);
   1863 	rcu_read_unlock();
   1864 	err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
   1865 	kfree(ids);
   1866 	if (err)
   1867 		return -EFAULT;
   1868 	if (nospc)
   1869 		return -ENOSPC;
   1870 	return 0;
   1871 }
   1872 
   1873 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *array,
   1874 				struct bpf_prog *old_prog)
   1875 {
   1876 	struct bpf_prog_array_item *item = array->items;
   1877 
   1878 	for (; item->prog; item++)
   1879 		if (item->prog == old_prog) {
   1880 			WRITE_ONCE(item->prog, &dummy_bpf_prog.prog);
   1881 			break;
   1882 		}
   1883 }
   1884 
   1885 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
   1886 			struct bpf_prog *exclude_prog,
   1887 			struct bpf_prog *include_prog,
   1888 			struct bpf_prog_array **new_array)
   1889 {
   1890 	int new_prog_cnt, carry_prog_cnt = 0;
   1891 	struct bpf_prog_array_item *existing;
   1892 	struct bpf_prog_array *array;
   1893 	bool found_exclude = false;
   1894 	int new_prog_idx = 0;
   1895 
   1896 	/* Figure out how many existing progs we need to carry over to
   1897 	 * the new array.
   1898 	 */
   1899 	if (old_array) {
   1900 		existing = old_array->items;
   1901 		for (; existing->prog; existing++) {
   1902 			if (existing->prog == exclude_prog) {
   1903 				found_exclude = true;
   1904 				continue;
   1905 			}
   1906 			if (existing->prog != &dummy_bpf_prog.prog)
   1907 				carry_prog_cnt++;
   1908 			if (existing->prog == include_prog)
   1909 				return -EEXIST;
   1910 		}
   1911 	}
   1912 
   1913 	if (exclude_prog && !found_exclude)
   1914 		return -ENOENT;
   1915 
   1916 	/* How many progs (not NULL) will be in the new array? */
   1917 	new_prog_cnt = carry_prog_cnt;
   1918 	if (include_prog)
   1919 		new_prog_cnt += 1;
   1920 
   1921 	/* Do we have any prog (not NULL) in the new array? */
   1922 	if (!new_prog_cnt) {
   1923 		*new_array = NULL;
   1924 		return 0;
   1925 	}
   1926 
   1927 	/* +1 as the end of prog_array is marked with NULL */
   1928 	array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL);
   1929 	if (!array)
   1930 		return -ENOMEM;
   1931 
   1932 	/* Fill in the new prog array */
   1933 	if (carry_prog_cnt) {
   1934 		existing = old_array->items;
   1935 		for (; existing->prog; existing++)
   1936 			if (existing->prog != exclude_prog &&
   1937 			    existing->prog != &dummy_bpf_prog.prog) {
   1938 				array->items[new_prog_idx++].prog =
   1939 					existing->prog;
   1940 			}
   1941 	}
   1942 	if (include_prog)
   1943 		array->items[new_prog_idx++].prog = include_prog;
   1944 	array->items[new_prog_idx].prog = NULL;
   1945 	*new_array = array;
   1946 	return 0;
   1947 }
   1948 
   1949 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
   1950 			     u32 *prog_ids, u32 request_cnt,
   1951 			     u32 *prog_cnt)
   1952 {
   1953 	u32 cnt = 0;
   1954 
   1955 	if (array)
   1956 		cnt = bpf_prog_array_length(array);
   1957 
   1958 	*prog_cnt = cnt;
   1959 
   1960 	/* return early if user requested only program count or nothing to copy */
   1961 	if (!request_cnt || !cnt)
   1962 		return 0;
   1963 
   1964 	/* this function is called under trace/bpf_trace.c: bpf_event_mutex */
   1965 	return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC
   1966 								     : 0;
   1967 }
   1968 
   1969 static void bpf_prog_free_deferred(struct work_struct *work)
   1970 {
   1971 	struct bpf_prog_aux *aux;
   1972 	int i;
   1973 
   1974 	aux = container_of(work, struct bpf_prog_aux, work);
   1975 	if (bpf_prog_is_dev_bound(aux))
   1976 		bpf_prog_offload_destroy(aux->prog);
   1977 #ifdef CONFIG_PERF_EVENTS
   1978 	if (aux->prog->has_callchain_buf)
   1979 		put_callchain_buffers();
   1980 #endif
   1981 	for (i = 0; i < aux->func_cnt; i++)
   1982 		bpf_jit_free(aux->func[i]);
   1983 	if (aux->func_cnt) {
   1984 		kfree(aux->func);
   1985 		bpf_prog_unlock_free(aux->prog);
   1986 	} else {
   1987 		bpf_jit_free(aux->prog);
   1988 	}
   1989 }
   1990 
   1991 /* Free internal BPF program */
   1992 void bpf_prog_free(struct bpf_prog *fp)
   1993 {
   1994 	struct bpf_prog_aux *aux = fp->aux;
   1995 
   1996 	INIT_WORK(&aux->work, bpf_prog_free_deferred);
   1997 	schedule_work(&aux->work);
   1998 }
   1999 EXPORT_SYMBOL_GPL(bpf_prog_free);
   2000 
   2001 /* RNG for unpriviledged user space with separated state from prandom_u32(). */
   2002 static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
   2003 
   2004 void bpf_user_rnd_init_once(void)
   2005 {
   2006 	prandom_init_once(&bpf_user_rnd_state);
   2007 }
   2008 
   2009 BPF_CALL_0(bpf_user_rnd_u32)
   2010 {
   2011 	/* Should someone ever have the rather unwise idea to use some
   2012 	 * of the registers passed into this function, then note that
   2013 	 * this function is called from native eBPF and classic-to-eBPF
   2014 	 * transformations. Register assignments from both sides are
   2015 	 * different, f.e. classic always sets fn(ctx, A, X) here.
   2016 	 */
   2017 	struct rnd_state *state;
   2018 	u32 res;
   2019 
   2020 	state = &get_cpu_var(bpf_user_rnd_state);
   2021 	res = prandom_u32_state(state);
   2022 	put_cpu_var(bpf_user_rnd_state);
   2023 
   2024 	return res;
   2025 }
   2026 
   2027 /* Weak definitions of helper functions in case we don't have bpf syscall. */
   2028 const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
   2029 const struct bpf_func_proto bpf_map_update_elem_proto __weak;
   2030 const struct bpf_func_proto bpf_map_delete_elem_proto __weak;
   2031 const struct bpf_func_proto bpf_map_push_elem_proto __weak;
   2032 const struct bpf_func_proto bpf_map_pop_elem_proto __weak;
   2033 const struct bpf_func_proto bpf_map_peek_elem_proto __weak;
   2034 const struct bpf_func_proto bpf_spin_lock_proto __weak;
   2035 const struct bpf_func_proto bpf_spin_unlock_proto __weak;
   2036 
   2037 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
   2038 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
   2039 const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
   2040 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
   2041 
   2042 const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
   2043 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
   2044 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
   2045 const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
   2046 const struct bpf_func_proto bpf_get_local_storage_proto __weak;
   2047 
   2048 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
   2049 {
   2050 	return NULL;
   2051 }
   2052 
   2053 u64 __weak
   2054 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
   2055 		 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
   2056 {
   2057 	return -ENOTSUPP;
   2058 }
   2059 EXPORT_SYMBOL_GPL(bpf_event_output);
   2060 
   2061 /* Always built-in helper functions. */
   2062 const struct bpf_func_proto bpf_tail_call_proto = {
   2063 	.func		= NULL,
   2064 	.gpl_only	= false,
   2065 	.ret_type	= RET_VOID,
   2066 	.arg1_type	= ARG_PTR_TO_CTX,
   2067 	.arg2_type	= ARG_CONST_MAP_PTR,
   2068 	.arg3_type	= ARG_ANYTHING,
   2069 };
   2070 
   2071 /* Stub for JITs that only support cBPF. eBPF programs are interpreted.
   2072  * It is encouraged to implement bpf_int_jit_compile() instead, so that
   2073  * eBPF and implicitly also cBPF can get JITed!
   2074  */
   2075 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
   2076 {
   2077 	return prog;
   2078 }
   2079 
   2080 /* Stub for JITs that support eBPF. All cBPF code gets transformed into
   2081  * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
   2082  */
   2083 void __weak bpf_jit_compile(struct bpf_prog *prog)
   2084 {
   2085 }
   2086 
   2087 bool __weak bpf_helper_changes_pkt_data(void *func)
   2088 {
   2089 	return false;
   2090 }
   2091 
   2092 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
   2093  * skb_copy_bits(), so provide a weak definition of it for NET-less config.
   2094  */
   2095 int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
   2096 			 int len)
   2097 {
   2098 	return -EFAULT;
   2099 }
   2100 
   2101 DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
   2102 EXPORT_SYMBOL(bpf_stats_enabled_key);
   2103 int sysctl_bpf_stats_enabled __read_mostly;
   2104 
   2105 /* All definitions of tracepoints related to BPF. */
   2106 #define CREATE_TRACE_POINTS
   2107 #include <linux/bpf_trace.h>
   2108 
   2109 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);