// SPDX-License-Identifier: GPL-2.0-only
/*
 * Testsuite for BPF interpreter and BPF JIT compiler
 *
 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/init.h>
#include <linux/module.h>
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/random.h>
#include <linux/highmem.h>
#include <linux/sched.h>

/* General test specific settings */
#define MAX_SUBTESTS	3
#define MAX_TESTRUNS	1000
#define MAX_DATA	128
#define MAX_INSNS	512
#define MAX_K		0xffffFFFF

/* Few constants used to init test 'skb' */
#define SKB_TYPE	3
#define SKB_MARK	0x1234aaaa
#define SKB_HASH	0x1234aaab
#define SKB_QUEUE_MAP	123
#define SKB_VLAN_TCI	0xffff
#define SKB_VLAN_PRESENT	1
#define SKB_DEV_IFINDEX	577
#define SKB_DEV_TYPE	588

/* Redefine REGs to make tests less verbose */
#define R0		BPF_REG_0
#define R1		BPF_REG_1
#define R2		BPF_REG_2
#define R3		BPF_REG_3
#define R4		BPF_REG_4
#define R5		BPF_REG_5
#define R6		BPF_REG_6
#define R7		BPF_REG_7
#define R8		BPF_REG_8
#define R9		BPF_REG_9
#define R10		BPF_REG_10

/* Flags that can be passed to test cases */
#define FLAG_NO_DATA		BIT(0)
#define FLAG_EXPECTED_FAIL	BIT(1)
#define FLAG_SKB_FRAG		BIT(2)
#define FLAG_VERIFIER_ZEXT	BIT(3)
#define FLAG_LARGE_MEM		BIT(4)

enum {
	CLASSIC  = BIT(6),	/* Old BPF instructions only. */
	INTERNAL = BIT(7),	/* Extended instruction set.  */
};

#define TEST_TYPE_MASK		(CLASSIC | INTERNAL)

struct bpf_test {
	const char *descr;
	union {
		struct sock_filter insns[MAX_INSNS];
		struct bpf_insn insns_int[MAX_INSNS];
		struct {
			void *insns;
			unsigned int len;
		} ptr;
	} u;
	__u8 aux;
	__u8 data[MAX_DATA];
	struct {
		int data_size;
		__u32 result;
	} test[MAX_SUBTESTS];
	int (*fill_helper)(struct bpf_test *self);
	int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
	__u8 frag_data[MAX_DATA];
	int stack_depth; /* for eBPF only, since tests don't call verifier */
	int nr_testruns; /* Custom run count, defaults to MAX_TESTRUNS if 0 */
};

/* Large test cases need separate allocation and fill handler. */

static int bpf_fill_maxinsns1(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	__u32 k = ~0;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	for (i = 0; i < len; i++, k--)
		insn[i] = __BPF_STMT(BPF_RET | BPF_K, k);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns2(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	for (i = 0; i < len; i++)
		insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns3(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	struct rnd_state rnd;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	prandom_seed_state(&rnd, 3141592653589793238ULL);

	for (i = 0; i < len - 1; i++) {
		__u32 k = prandom_u32_state(&rnd);

		insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k);
	}

	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns4(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS + 1;
	struct sock_filter *insn;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	for (i = 0; i < len; i++)
		insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns5(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);

	for (i = 1; i < len - 1; i++)
		insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe);

	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns6(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	for (i = 0; i < len - 1; i++)
		insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
				     SKF_AD_VLAN_TAG_PRESENT);

	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns7(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	for (i = 0; i < len - 4; i++)
		insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
				     SKF_AD_CPU);

	insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0);
	insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF +
				   SKF_AD_CPU);
	insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0);
	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns8(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	int i, jmp_off = len - 3;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff);

	for (i = 1; i < len - 1; i++)
		insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0);

	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns9(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct bpf_insn *insn;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2);
	insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab);
	insn[2] = BPF_EXIT_INSN();

	for (i = 3; i < len - 2; i++)
		insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe);

	insn[len - 2] = BPF_EXIT_INSN();
	insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1));

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns10(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS, hlen = len - 2;
	struct bpf_insn *insn;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	for (i = 0; i < hlen / 2; i++)
		insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i);
	for (i = hlen - 1; i > hlen / 2; i--)
		insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i);

	insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1);
	insn[hlen]     = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac);
	insn[hlen + 1] = BPF_EXIT_INSN();

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int __bpf_fill_ja(struct bpf_test *self, unsigned int len,
			 unsigned int plen)
{
	struct sock_filter *insn;
	unsigned int rlen;
	int i, j;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	rlen = (len % plen) - 1;

	for (i = 0; i + plen < len; i += plen)
		for (j = 0; j < plen; j++)
			insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA,
						 plen - 1 - j, 0, 0);
	for (j = 0; j < rlen; j++)
		insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j,
					 0, 0);

	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns11(struct bpf_test *self)
{
	/* Hits 70 passes on x86_64 and triggers NOPs padding. */
	return __bpf_fill_ja(self, BPF_MAXINSNS, 68);
}

static int bpf_fill_maxinsns12(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	int i = 0;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0);

	for (i = 1; i < len - 1; i++)
		insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);

	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_maxinsns13(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	int i = 0;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	for (i = 0; i < len - 3; i++)
		insn[i] = __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0);

	insn[len - 3] = __BPF_STMT(BPF_LD | BPF_IMM, 0xabababab);
	insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0);
	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int bpf_fill_ja(struct bpf_test *self)
{
	/* Hits exactly 11 passes on x86_64 JIT. */
	return __bpf_fill_ja(self, 12, 9);
}

static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
{
	unsigned int len = BPF_MAXINSNS;
	struct sock_filter *insn;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	for (i = 0; i < len - 1; i += 2) {
		insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
		insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
					 SKF_AD_OFF + SKF_AD_CPU);
	}

	insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;

	return 0;
}

static int __bpf_fill_stxdw(struct bpf_test *self, int size)
{
	unsigned int len = BPF_MAXINSNS;
	struct bpf_insn *insn;
	int i;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	insn[0] = BPF_ALU32_IMM(BPF_MOV, R0, 1);
	insn[1] = BPF_ST_MEM(size, R10, -40, 42);

	for (i = 2; i < len - 2; i++)
		insn[i] = BPF_STX_XADD(size, R10, R0, -40);

	insn[len - 2] = BPF_LDX_MEM(size, R0, R10, -40);
	insn[len - 1] = BPF_EXIT_INSN();

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;
	self->stack_depth = 40;

	return 0;
}

static int bpf_fill_stxw(struct bpf_test *self)
{
	return __bpf_fill_stxdw(self, BPF_W);
}

static int bpf_fill_stxdw(struct bpf_test *self)
{
	return __bpf_fill_stxdw(self, BPF_DW);
}

static int __bpf_ld_imm64(struct bpf_insn insns[2], u8 reg, s64 imm64)
{
	struct bpf_insn tmp[] = {BPF_LD_IMM64(reg, imm64)};

	memcpy(insns, tmp, sizeof(tmp));
	return 2;
}

/*
 * Branch conversion tests. Complex operations can expand to a lot
 * of instructions when JITed. This in turn may cause jump offsets
 * to overflow the field size of the native instruction, triggering
 * a branch conversion mechanism in some JITs.
 */
static int __bpf_fill_max_jmp(struct bpf_test *self, int jmp, int imm)
{
	struct bpf_insn *insns;
	int len = S16_MAX + 5;
	int i;

	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
	if (!insns)
		return -ENOMEM;

	i = __bpf_ld_imm64(insns, R1, 0x0123456789abcdefULL);
	insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
	insns[i++] = BPF_JMP_IMM(jmp, R0, imm, S16_MAX);
	insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 2);
	insns[i++] = BPF_EXIT_INSN();

	while (i < len - 1) {
		static const int ops[] = {
			BPF_LSH, BPF_RSH, BPF_ARSH, BPF_ADD,
			BPF_SUB, BPF_MUL, BPF_DIV, BPF_MOD,
		};
		int op = ops[(i >> 1) % ARRAY_SIZE(ops)];

		if (i & 1)
			insns[i++] = BPF_ALU32_REG(op, R0, R1);
		else
			insns[i++] = BPF_ALU64_REG(op, R0, R1);
	}

	insns[i++] = BPF_EXIT_INSN();
	self->u.ptr.insns = insns;
	self->u.ptr.len = len;
	BUG_ON(i != len);

	return 0;
}

/* Branch taken by runtime decision */
static int bpf_fill_max_jmp_taken(struct bpf_test *self)
{
	return __bpf_fill_max_jmp(self, BPF_JEQ, 1);
}

/* Branch not taken by runtime decision */
static int bpf_fill_max_jmp_not_taken(struct bpf_test *self)
{
	return __bpf_fill_max_jmp(self, BPF_JEQ, 0);
}

/* Branch always taken, known at JIT time */
static int bpf_fill_max_jmp_always_taken(struct bpf_test *self)
{
	return __bpf_fill_max_jmp(self, BPF_JGE, 0);
}

/* Branch never taken, known at JIT time */
static int bpf_fill_max_jmp_never_taken(struct bpf_test *self)
{
	return __bpf_fill_max_jmp(self, BPF_JLT, 0);
}

/* ALU result computation used in tests */
static bool __bpf_alu_result(u64 *res, u64 v1, u64 v2, u8 op)
{
	*res = 0;
	switch (op) {
	case BPF_MOV:
		*res = v2;
		break;
	case BPF_AND:
		*res = v1 & v2;
		break;
	case BPF_OR:
		*res = v1 | v2;
		break;
	case BPF_XOR:
		*res = v1 ^ v2;
		break;
	case BPF_LSH:
		*res = v1 << v2;
		break;
	case BPF_RSH:
		*res = v1 >> v2;
		break;
	case BPF_ARSH:
		*res = v1 >> v2;
		if (v2 > 0 && v1 > S64_MAX)
			*res |= ~0ULL << (64 - v2);
		break;
	case BPF_ADD:
		*res = v1 + v2;
		break;
	case BPF_SUB:
		*res = v1 - v2;
		break;
	case BPF_MUL:
		*res = v1 * v2;
		break;
	case BPF_DIV:
		if (v2 == 0)
			return false;
		*res = div64_u64(v1, v2);
		break;
	case BPF_MOD:
		if (v2 == 0)
			return false;
		div64_u64_rem(v1, v2, res);
		break;
	}
	return true;
}

/* Test an ALU shift operation for all valid shift values */
static int __bpf_fill_alu_shift(struct bpf_test *self, u8 op,
				u8 mode, bool alu32)
{
	static const s64 regs[] = {
		0x0123456789abcdefLL, /* dword > 0, word < 0 */
		0xfedcba9876543210LL, /* dword < 0, word > 0 */
		0xfedcba0198765432LL, /* dword < 0, word < 0 */
		0x0123458967abcdefLL, /* dword > 0, word > 0 */
	};
	int bits = alu32 ? 32 : 64;
	int len = (2 + 7 * bits) * ARRAY_SIZE(regs) + 3;
	struct bpf_insn *insn;
	int imm, k;
	int i = 0;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);

	for (k = 0; k < ARRAY_SIZE(regs); k++) {
		s64 reg = regs[k];

		i += __bpf_ld_imm64(&insn[i], R3, reg);

		for (imm = 0; imm < bits; imm++) {
			u64 val;

			/* Perform operation */
			insn[i++] = BPF_ALU64_REG(BPF_MOV, R1, R3);
			insn[i++] = BPF_ALU64_IMM(BPF_MOV, R2, imm);
			if (alu32) {
				if (mode == BPF_K)
					insn[i++] = BPF_ALU32_IMM(op, R1, imm);
				else
					insn[i++] = BPF_ALU32_REG(op, R1, R2);

				if (op == BPF_ARSH)
					reg = (s32)reg;
				else
					reg = (u32)reg;
				__bpf_alu_result(&val, reg, imm, op);
				val = (u32)val;
			} else {
				if (mode == BPF_K)
					insn[i++] = BPF_ALU64_IMM(op, R1, imm);
				else
					insn[i++] = BPF_ALU64_REG(op, R1, R2);
				__bpf_alu_result(&val, reg, imm, op);
			}

			/*
			 * When debugging a JIT that fails this test, one
			 * can write the immediate value to R0 here to find
			 * out which operand values that fail.
			 */

			/* Load reference and check the result */
			i += __bpf_ld_imm64(&insn[i], R4, val);
			insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R4, 1);
			insn[i++] = BPF_EXIT_INSN();
		}
	}

	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
	insn[i++] = BPF_EXIT_INSN();

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;
	BUG_ON(i != len);

	return 0;
}

static int bpf_fill_alu64_lsh_imm(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, false);
}

static int bpf_fill_alu64_rsh_imm(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, false);
}

static int bpf_fill_alu64_arsh_imm(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, false);
}

static int bpf_fill_alu64_lsh_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, false);
}

static int bpf_fill_alu64_rsh_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, false);
}

static int bpf_fill_alu64_arsh_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, false);
}

static int bpf_fill_alu32_lsh_imm(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_K, true);
}

static int bpf_fill_alu32_rsh_imm(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_K, true);
}

static int bpf_fill_alu32_arsh_imm(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_K, true);
}

static int bpf_fill_alu32_lsh_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_LSH, BPF_X, true);
}

static int bpf_fill_alu32_rsh_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_RSH, BPF_X, true);
}

static int bpf_fill_alu32_arsh_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift(self, BPF_ARSH, BPF_X, true);
}

/*
 * Test an ALU register shift operation for all valid shift values
 * for the case when the source and destination are the same.
 */
static int __bpf_fill_alu_shift_same_reg(struct bpf_test *self, u8 op,
					 bool alu32)
{
	int bits = alu32 ? 32 : 64;
	int len = 3 + 6 * bits;
	struct bpf_insn *insn;
	int i = 0;
	u64 val;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);

	for (val = 0; val < bits; val++) {
		u64 res;

		/* Perform operation */
		insn[i++] = BPF_ALU64_IMM(BPF_MOV, R1, val);
		if (alu32)
			insn[i++] = BPF_ALU32_REG(op, R1, R1);
		else
			insn[i++] = BPF_ALU64_REG(op, R1, R1);

		/* Compute the reference result */
		__bpf_alu_result(&res, val, val, op);
		if (alu32)
			res = (u32)res;
		i += __bpf_ld_imm64(&insn[i], R2, res);

		/* Check the actual result */
		insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
		insn[i++] = BPF_EXIT_INSN();
	}

	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
	insn[i++] = BPF_EXIT_INSN();

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;
	BUG_ON(i != len);

	return 0;
}

static int bpf_fill_alu64_lsh_same_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, false);
}

static int bpf_fill_alu64_rsh_same_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, false);
}

static int bpf_fill_alu64_arsh_same_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, false);
}

static int bpf_fill_alu32_lsh_same_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift_same_reg(self, BPF_LSH, true);
}

static int bpf_fill_alu32_rsh_same_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift_same_reg(self, BPF_RSH, true);
}

static int bpf_fill_alu32_arsh_same_reg(struct bpf_test *self)
{
	return __bpf_fill_alu_shift_same_reg(self, BPF_ARSH, true);
}

/*
 * Common operand pattern generator for exhaustive power-of-two magnitudes
 * tests. The block size parameters can be adjusted to increase/reduce the
 * number of combinatons tested and thereby execution speed and memory
 * footprint.
 */

static inline s64 value(int msb, int delta, int sign)
{
	return sign * (1LL << msb) + delta;
}

static int __bpf_fill_pattern(struct bpf_test *self, void *arg,
			      int dbits, int sbits, int block1, int block2,
			      int (*emit)(struct bpf_test*, void*,
					  struct bpf_insn*, s64, s64))
{
	static const int sgn[][2] = {{1, 1}, {1, -1}, {-1, 1}, {-1, -1}};
	struct bpf_insn *insns;
	int di, si, bt, db, sb;
	int count, len, k;
	int extra = 1 + 2;
	int i = 0;

	/* Total number of iterations for the two pattern */
	count = (dbits - 1) * (sbits - 1) * block1 * block1 * ARRAY_SIZE(sgn);
	count += (max(dbits, sbits) - 1) * block2 * block2 * ARRAY_SIZE(sgn);

	/* Compute the maximum number of insns and allocate the buffer */
	len = extra + count * (*emit)(self, arg, NULL, 0, 0);
	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
	if (!insns)
		return -ENOMEM;

	/* Add head instruction(s) */
	insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);

	/*
	 * Pattern 1: all combinations of power-of-two magnitudes and sign,
	 * and with a block of contiguous values around each magnitude.
	 */
	for (di = 0; di < dbits - 1; di++)                 /* Dst magnitudes */
		for (si = 0; si < sbits - 1; si++)         /* Src magnitudes */
			for (k = 0; k < ARRAY_SIZE(sgn); k++) /* Sign combos */
				for (db = -(block1 / 2);
				     db < (block1 + 1) / 2; db++)
					for (sb = -(block1 / 2);
					     sb < (block1 + 1) / 2; sb++) {
						s64 dst, src;

						dst = value(di, db, sgn[k][0]);
						src = value(si, sb, sgn[k][1]);
						i += (*emit)(self, arg,
							     &insns[i],
							     dst, src);
					}
	/*
	 * Pattern 2: all combinations for a larger block of values
	 * for each power-of-two magnitude and sign, where the magnitude is
	 * the same for both operands.
	 */
	for (bt = 0; bt < max(dbits, sbits) - 1; bt++)        /* Magnitude   */
		for (k = 0; k < ARRAY_SIZE(sgn); k++)         /* Sign combos */
			for (db = -(block2 / 2); db < (block2 + 1) / 2; db++)
				for (sb = -(block2 / 2);
				     sb < (block2 + 1) / 2; sb++) {
					s64 dst, src;

					dst = value(bt % dbits, db, sgn[k][0]);
					src = value(bt % sbits, sb, sgn[k][1]);
					i += (*emit)(self, arg, &insns[i],
						     dst, src);
				}

	/* Append tail instructions */
	insns[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
	insns[i++] = BPF_EXIT_INSN();
	BUG_ON(i > len);

	self->u.ptr.insns = insns;
	self->u.ptr.len = i;

	return 0;
}

/*
 * Block size parameters used in pattern tests below. une as needed to
 * increase/reduce the number combinations tested, see following examples.
 *        block   values per operand MSB
 * ----------------------------------------
 *           0     none
 *           1     (1 << MSB)
 *           2     (1 << MSB) + [-1, 0]
 *           3     (1 << MSB) + [-1, 0, 1]
 */
#define PATTERN_BLOCK1 1
#define PATTERN_BLOCK2 5

/* Number of test runs for a pattern test */
#define NR_PATTERN_RUNS 1

/*
 * Exhaustive tests of ALU operations for all combinations of power-of-two
 * magnitudes of the operands, both for positive and negative values. The
 * test is designed to verify e.g. the ALU and ALU64 operations for JITs that
 * emit different code depending on the magnitude of the immediate value.
 */
static int __bpf_emit_alu64_imm(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 imm)
{
	int op = *(int *)arg;
	int i = 0;
	u64 res;

	if (!insns)
		return 7;

	if (__bpf_alu_result(&res, dst, (s32)imm, op)) {
		i += __bpf_ld_imm64(&insns[i], R1, dst);
		i += __bpf_ld_imm64(&insns[i], R3, res);
		insns[i++] = BPF_ALU64_IMM(op, R1, imm);
		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
		insns[i++] = BPF_EXIT_INSN();
	}

	return i;
}

static int __bpf_emit_alu32_imm(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 imm)
{
	int op = *(int *)arg;
	int i = 0;
	u64 res;

	if (!insns)
		return 7;

	if (__bpf_alu_result(&res, (u32)dst, (u32)imm, op)) {
		i += __bpf_ld_imm64(&insns[i], R1, dst);
		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
		insns[i++] = BPF_ALU32_IMM(op, R1, imm);
		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
		insns[i++] = BPF_EXIT_INSN();
	}

	return i;
}

static int __bpf_emit_alu64_reg(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{
	int op = *(int *)arg;
	int i = 0;
	u64 res;

	if (!insns)
		return 9;

	if (__bpf_alu_result(&res, dst, src, op)) {
		i += __bpf_ld_imm64(&insns[i], R1, dst);
		i += __bpf_ld_imm64(&insns[i], R2, src);
		i += __bpf_ld_imm64(&insns[i], R3, res);
		insns[i++] = BPF_ALU64_REG(op, R1, R2);
		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
		insns[i++] = BPF_EXIT_INSN();
	}

	return i;
}

static int __bpf_emit_alu32_reg(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{
	int op = *(int *)arg;
	int i = 0;
	u64 res;

	if (!insns)
		return 9;

	if (__bpf_alu_result(&res, (u32)dst, (u32)src, op)) {
		i += __bpf_ld_imm64(&insns[i], R1, dst);
		i += __bpf_ld_imm64(&insns[i], R2, src);
		i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
		insns[i++] = BPF_ALU32_REG(op, R1, R2);
		insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
		insns[i++] = BPF_EXIT_INSN();
	}

	return i;
}

static int __bpf_fill_alu64_imm(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 32,
				  PATTERN_BLOCK1, PATTERN_BLOCK2,
				  &__bpf_emit_alu64_imm);
}

static int __bpf_fill_alu32_imm(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 32,
				  PATTERN_BLOCK1, PATTERN_BLOCK2,
				  &__bpf_emit_alu32_imm);
}

static int __bpf_fill_alu64_reg(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 64,
				  PATTERN_BLOCK1, PATTERN_BLOCK2,
				  &__bpf_emit_alu64_reg);
}

static int __bpf_fill_alu32_reg(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 64,
				  PATTERN_BLOCK1, PATTERN_BLOCK2,
				  &__bpf_emit_alu32_reg);
}

/* ALU64 immediate operations */
static int bpf_fill_alu64_mov_imm(struct bpf_test *self)
{
	return __bpf_fill_alu64_imm(self, BPF_MOV);
}

static int bpf_fill_alu64_and_imm(struct bpf_test *self)
{
	return __bpf_fill_alu64_imm(self, BPF_AND);
}

static int bpf_fill_alu64_or_imm(struct bpf_test *self)
{
	return __bpf_fill_alu64_imm(self, BPF_OR);
}

static int bpf_fill_alu64_xor_imm(struct bpf_test *self)
{
	return __bpf_fill_alu64_imm(self, BPF_XOR);
}

static int bpf_fill_alu64_add_imm(struct bpf_test *self)
{
	return __bpf_fill_alu64_imm(self, BPF_ADD);
}

static int bpf_fill_alu64_sub_imm(struct bpf_test *self)
{
	return __bpf_fill_alu64_imm(self, BPF_SUB);
}

static int bpf_fill_alu64_mul_imm(struct bpf_test *self)
{
	return __bpf_fill_alu64_imm(self, BPF_MUL);
}

static int bpf_fill_alu64_div_imm(struct bpf_test *self)
{
	return __bpf_fill_alu64_imm(self, BPF_DIV);
}

static int bpf_fill_alu64_mod_imm(struct bpf_test *self)
{
	return __bpf_fill_alu64_imm(self, BPF_MOD);
}

/* ALU32 immediate operations */
static int bpf_fill_alu32_mov_imm(struct bpf_test *self)
{
	return __bpf_fill_alu32_imm(self, BPF_MOV);
}

static int bpf_fill_alu32_and_imm(struct bpf_test *self)
{
	return __bpf_fill_alu32_imm(self, BPF_AND);
}

static int bpf_fill_alu32_or_imm(struct bpf_test *self)
{
	return __bpf_fill_alu32_imm(self, BPF_OR);
}

static int bpf_fill_alu32_xor_imm(struct bpf_test *self)
{
	return __bpf_fill_alu32_imm(self, BPF_XOR);
}

static int bpf_fill_alu32_add_imm(struct bpf_test *self)
{
	return __bpf_fill_alu32_imm(self, BPF_ADD);
}

static int bpf_fill_alu32_sub_imm(struct bpf_test *self)
{
	return __bpf_fill_alu32_imm(self, BPF_SUB);
}

static int bpf_fill_alu32_mul_imm(struct bpf_test *self)
{
	return __bpf_fill_alu32_imm(self, BPF_MUL);
}

static int bpf_fill_alu32_div_imm(struct bpf_test *self)
{
	return __bpf_fill_alu32_imm(self, BPF_DIV);
}

static int bpf_fill_alu32_mod_imm(struct bpf_test *self)
{
	return __bpf_fill_alu32_imm(self, BPF_MOD);
}

/* ALU64 register operations */
static int bpf_fill_alu64_mov_reg(struct bpf_test *self)
{
	return __bpf_fill_alu64_reg(self, BPF_MOV);
}

static int bpf_fill_alu64_and_reg(struct bpf_test *self)
{
	return __bpf_fill_alu64_reg(self, BPF_AND);
}

static int bpf_fill_alu64_or_reg(struct bpf_test *self)
{
	return __bpf_fill_alu64_reg(self, BPF_OR);
}

static int bpf_fill_alu64_xor_reg(struct bpf_test *self)
{
	return __bpf_fill_alu64_reg(self, BPF_XOR);
}

static int bpf_fill_alu64_add_reg(struct bpf_test *self)
{
	return __bpf_fill_alu64_reg(self, BPF_ADD);
}

static int bpf_fill_alu64_sub_reg(struct bpf_test *self)
{
	return __bpf_fill_alu64_reg(self, BPF_SUB);
}

static int bpf_fill_alu64_mul_reg(struct bpf_test *self)
{
	return __bpf_fill_alu64_reg(self, BPF_MUL);
}

static int bpf_fill_alu64_div_reg(struct bpf_test *self)
{
	return __bpf_fill_alu64_reg(self, BPF_DIV);
}

static int bpf_fill_alu64_mod_reg(struct bpf_test *self)
{
	return __bpf_fill_alu64_reg(self, BPF_MOD);
}

/* ALU32 register operations */
static int bpf_fill_alu32_mov_reg(struct bpf_test *self)
{
	return __bpf_fill_alu32_reg(self, BPF_MOV);
}

static int bpf_fill_alu32_and_reg(struct bpf_test *self)
{
	return __bpf_fill_alu32_reg(self, BPF_AND);
}

static int bpf_fill_alu32_or_reg(struct bpf_test *self)
{
	return __bpf_fill_alu32_reg(self, BPF_OR);
}

static int bpf_fill_alu32_xor_reg(struct bpf_test *self)
{
	return __bpf_fill_alu32_reg(self, BPF_XOR);
}

static int bpf_fill_alu32_add_reg(struct bpf_test *self)
{
	return __bpf_fill_alu32_reg(self, BPF_ADD);
}

static int bpf_fill_alu32_sub_reg(struct bpf_test *self)
{
	return __bpf_fill_alu32_reg(self, BPF_SUB);
}

static int bpf_fill_alu32_mul_reg(struct bpf_test *self)
{
	return __bpf_fill_alu32_reg(self, BPF_MUL);
}

static int bpf_fill_alu32_div_reg(struct bpf_test *self)
{
	return __bpf_fill_alu32_reg(self, BPF_DIV);
}

static int bpf_fill_alu32_mod_reg(struct bpf_test *self)
{
	return __bpf_fill_alu32_reg(self, BPF_MOD);
}

/*
 * Test JITs that implement complex ALU operations as function
 * calls, and must re-arrange operands for argument passing.
 */
static int __bpf_fill_alu_imm_regs(struct bpf_test *self, u8 op, bool alu32)
{
	int len = 2 + 10 * 10;
	struct bpf_insn *insns;
	u64 dst, res;
	int i = 0;
	u32 imm;
	int rd;

	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
	if (!insns)
		return -ENOMEM;

	/* Operand and result values according to operation */
	if (alu32)
		dst = 0x76543210U;
	else
		dst = 0x7edcba9876543210ULL;
	imm = 0x01234567U;

	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
		imm &= 31;

	__bpf_alu_result(&res, dst, imm, op);

	if (alu32)
		res = (u32)res;

	/* Check all operand registers */
	for (rd = R0; rd <= R9; rd++) {
		i += __bpf_ld_imm64(&insns[i], rd, dst);

		if (alu32)
			insns[i++] = BPF_ALU32_IMM(op, rd, imm);
		else
			insns[i++] = BPF_ALU64_IMM(op, rd, imm);

		insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res, 2);
		insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
		insns[i++] = BPF_EXIT_INSN();

		insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
		insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, res >> 32, 2);
		insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
		insns[i++] = BPF_EXIT_INSN();
	}

	insns[i++] = BPF_MOV64_IMM(R0, 1);
	insns[i++] = BPF_EXIT_INSN();

	self->u.ptr.insns = insns;
	self->u.ptr.len = len;
	BUG_ON(i != len);

	return 0;
}

/* ALU64 K registers */
static int bpf_fill_alu64_mov_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_MOV, false);
}

static int bpf_fill_alu64_and_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_AND, false);
}

static int bpf_fill_alu64_or_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_OR, false);
}

static int bpf_fill_alu64_xor_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_XOR, false);
}

static int bpf_fill_alu64_lsh_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_LSH, false);
}

static int bpf_fill_alu64_rsh_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_RSH, false);
}

static int bpf_fill_alu64_arsh_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, false);
}

static int bpf_fill_alu64_add_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_ADD, false);
}

static int bpf_fill_alu64_sub_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_SUB, false);
}

static int bpf_fill_alu64_mul_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_MUL, false);
}

static int bpf_fill_alu64_div_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_DIV, false);
}

static int bpf_fill_alu64_mod_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_MOD, false);
}

/* ALU32 K registers */
static int bpf_fill_alu32_mov_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_MOV, true);
}

static int bpf_fill_alu32_and_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_AND, true);
}

static int bpf_fill_alu32_or_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_OR, true);
}

static int bpf_fill_alu32_xor_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_XOR, true);
}

static int bpf_fill_alu32_lsh_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_LSH, true);
}

static int bpf_fill_alu32_rsh_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_RSH, true);
}

static int bpf_fill_alu32_arsh_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_ARSH, true);
}

static int bpf_fill_alu32_add_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_ADD, true);
}

static int bpf_fill_alu32_sub_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_SUB, true);
}

static int bpf_fill_alu32_mul_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_MUL, true);
}

static int bpf_fill_alu32_div_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_DIV, true);
}

static int bpf_fill_alu32_mod_imm_regs(struct bpf_test *self)
{
	return __bpf_fill_alu_imm_regs(self, BPF_MOD, true);
}

/*
 * Test JITs that implement complex ALU operations as function
 * calls, and must re-arrange operands for argument passing.
 */
static int __bpf_fill_alu_reg_pairs(struct bpf_test *self, u8 op, bool alu32)
{
	int len = 2 + 10 * 10 * 12;
	u64 dst, src, res, same;
	struct bpf_insn *insns;
	int rd, rs;
	int i = 0;

	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
	if (!insns)
		return -ENOMEM;

	/* Operand and result values according to operation */
	if (alu32) {
		dst = 0x76543210U;
		src = 0x01234567U;
	} else {
		dst = 0x7edcba9876543210ULL;
		src = 0x0123456789abcdefULL;
	}

	if (op == BPF_LSH || op == BPF_RSH || op == BPF_ARSH)
		src &= 31;

	__bpf_alu_result(&res, dst, src, op);
	__bpf_alu_result(&same, src, src, op);

	if (alu32) {
		res = (u32)res;
		same = (u32)same;
	}

	/* Check all combinations of operand registers */
	for (rd = R0; rd <= R9; rd++) {
		for (rs = R0; rs <= R9; rs++) {
			u64 val = rd == rs ? same : res;

			i += __bpf_ld_imm64(&insns[i], rd, dst);
			i += __bpf_ld_imm64(&insns[i], rs, src);

			if (alu32)
				insns[i++] = BPF_ALU32_REG(op, rd, rs);
			else
				insns[i++] = BPF_ALU64_REG(op, rd, rs);

			insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val, 2);
			insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
			insns[i++] = BPF_EXIT_INSN();

			insns[i++] = BPF_ALU64_IMM(BPF_RSH, rd, 32);
			insns[i++] = BPF_JMP32_IMM(BPF_JEQ, rd, val >> 32, 2);
			insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
			insns[i++] = BPF_EXIT_INSN();
		}
	}

	insns[i++] = BPF_MOV64_IMM(R0, 1);
	insns[i++] = BPF_EXIT_INSN();

	self->u.ptr.insns = insns;
	self->u.ptr.len = len;
	BUG_ON(i != len);

	return 0;
}

/* ALU64 X register combinations */
static int bpf_fill_alu64_mov_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_MOV, false);
}

static int bpf_fill_alu64_and_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_AND, false);
}

static int bpf_fill_alu64_or_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_OR, false);
}

static int bpf_fill_alu64_xor_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_XOR, false);
}

static int bpf_fill_alu64_lsh_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_LSH, false);
}

static int bpf_fill_alu64_rsh_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_RSH, false);
}

static int bpf_fill_alu64_arsh_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, false);
}

static int bpf_fill_alu64_add_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_ADD, false);
}

static int bpf_fill_alu64_sub_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_SUB, false);
}

static int bpf_fill_alu64_mul_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_MUL, false);
}

static int bpf_fill_alu64_div_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_DIV, false);
}

static int bpf_fill_alu64_mod_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_MOD, false);
}

/* ALU32 X register combinations */
static int bpf_fill_alu32_mov_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_MOV, true);
}

static int bpf_fill_alu32_and_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_AND, true);
}

static int bpf_fill_alu32_or_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_OR, true);
}

static int bpf_fill_alu32_xor_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_XOR, true);
}

static int bpf_fill_alu32_lsh_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_LSH, true);
}

static int bpf_fill_alu32_rsh_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_RSH, true);
}

static int bpf_fill_alu32_arsh_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_ARSH, true);
}

static int bpf_fill_alu32_add_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_ADD, true);
}

static int bpf_fill_alu32_sub_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_SUB, true);
}

static int bpf_fill_alu32_mul_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_MUL, true);
}

static int bpf_fill_alu32_div_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_DIV, true);
}

static int bpf_fill_alu32_mod_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_alu_reg_pairs(self, BPF_MOD, true);
}

/*
 * Exhaustive tests of atomic operations for all power-of-two operand
 * magnitudes, both for positive and negative values.
 */

static int __bpf_emit_atomic64(struct bpf_test *self, void *arg,
			       struct bpf_insn *insns, s64 dst, s64 src)
{
	int op = *(int *)arg;
	u64 keep, fetch, res;
	int i = 0;

	if (!insns)
		return 21;

	switch (op) {
	case BPF_XCHG:
		res = src;
		break;
	default:
		__bpf_alu_result(&res, dst, src, BPF_OP(op));
	}

	keep = 0x0123456789abcdefULL;
	if (op & BPF_FETCH)
		fetch = dst;
	else
		fetch = src;

	i += __bpf_ld_imm64(&insns[i], R0, keep);
	i += __bpf_ld_imm64(&insns[i], R1, dst);
	i += __bpf_ld_imm64(&insns[i], R2, src);
	i += __bpf_ld_imm64(&insns[i], R3, res);
	i += __bpf_ld_imm64(&insns[i], R4, fetch);
	i += __bpf_ld_imm64(&insns[i], R5, keep);

	insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
	insns[i++] = BPF_ATOMIC_OP(BPF_DW, op, R10, R2, -8);
	insns[i++] = BPF_LDX_MEM(BPF_DW, R1, R10, -8);

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
	insns[i++] = BPF_EXIT_INSN();

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
	insns[i++] = BPF_EXIT_INSN();

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
	insns[i++] = BPF_EXIT_INSN();

	return i;
}

static int __bpf_emit_atomic32(struct bpf_test *self, void *arg,
			       struct bpf_insn *insns, s64 dst, s64 src)
{
	int op = *(int *)arg;
	u64 keep, fetch, res;
	int i = 0;

	if (!insns)
		return 21;

	switch (op) {
	case BPF_XCHG:
		res = src;
		break;
	default:
		__bpf_alu_result(&res, (u32)dst, (u32)src, BPF_OP(op));
	}

	keep = 0x0123456789abcdefULL;
	if (op & BPF_FETCH)
		fetch = (u32)dst;
	else
		fetch = src;

	i += __bpf_ld_imm64(&insns[i], R0, keep);
	i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
	i += __bpf_ld_imm64(&insns[i], R2, src);
	i += __bpf_ld_imm64(&insns[i], R3, (u32)res);
	i += __bpf_ld_imm64(&insns[i], R4, fetch);
	i += __bpf_ld_imm64(&insns[i], R5, keep);

	insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
	insns[i++] = BPF_ATOMIC_OP(BPF_W, op, R10, R2, -4);
	insns[i++] = BPF_LDX_MEM(BPF_W, R1, R10, -4);

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 1);
	insns[i++] = BPF_EXIT_INSN();

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R4, 1);
	insns[i++] = BPF_EXIT_INSN();

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R5, 1);
	insns[i++] = BPF_EXIT_INSN();

	return i;
}

static int __bpf_emit_cmpxchg64(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{
	int i = 0;

	if (!insns)
		return 23;

	i += __bpf_ld_imm64(&insns[i], R0, ~dst);
	i += __bpf_ld_imm64(&insns[i], R1, dst);
	i += __bpf_ld_imm64(&insns[i], R2, src);

	/* Result unsuccessful */
	insns[i++] = BPF_STX_MEM(BPF_DW, R10, R1, -8);
	insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
	insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R1, R3, 2);
	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
	insns[i++] = BPF_EXIT_INSN();

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
	insns[i++] = BPF_EXIT_INSN();

	/* Result successful */
	insns[i++] = BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -8);
	insns[i++] = BPF_LDX_MEM(BPF_DW, R3, R10, -8);

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R2, R3, 2);
	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
	insns[i++] = BPF_EXIT_INSN();

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
	insns[i++] = BPF_MOV64_IMM(R0, __LINE__);
	insns[i++] = BPF_EXIT_INSN();

	return i;
}

static int __bpf_emit_cmpxchg32(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{
	int i = 0;

	if (!insns)
		return 27;

	i += __bpf_ld_imm64(&insns[i], R0, ~dst);
	i += __bpf_ld_imm64(&insns[i], R1, (u32)dst);
	i += __bpf_ld_imm64(&insns[i], R2, src);

	/* Result unsuccessful */
	insns[i++] = BPF_STX_MEM(BPF_W, R10, R1, -4);
	insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
	insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
	insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);

	insns[i++] = BPF_JMP32_REG(BPF_JEQ, R1, R3, 2);
	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
	insns[i++] = BPF_EXIT_INSN();

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R3, 2);
	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
	insns[i++] = BPF_EXIT_INSN();

	/* Result successful */
	i += __bpf_ld_imm64(&insns[i], R0, dst);
	insns[i++] = BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R2, -4);
	insns[i++] = BPF_ZEXT_REG(R0), /* Zext always inserted by verifier */
	insns[i++] = BPF_LDX_MEM(BPF_W, R3, R10, -4);

	insns[i++] = BPF_JMP32_REG(BPF_JEQ, R2, R3, 2);
	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
	insns[i++] = BPF_EXIT_INSN();

	insns[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
	insns[i++] = BPF_MOV32_IMM(R0, __LINE__);
	insns[i++] = BPF_EXIT_INSN();

	return i;
}

static int __bpf_fill_atomic64(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 64,
				  0, PATTERN_BLOCK2,
				  &__bpf_emit_atomic64);
}

static int __bpf_fill_atomic32(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 64,
				  0, PATTERN_BLOCK2,
				  &__bpf_emit_atomic32);
}

/* 64-bit atomic operations */
static int bpf_fill_atomic64_add(struct bpf_test *self)
{
	return __bpf_fill_atomic64(self, BPF_ADD);
}

static int bpf_fill_atomic64_and(struct bpf_test *self)
{
	return __bpf_fill_atomic64(self, BPF_AND);
}

static int bpf_fill_atomic64_or(struct bpf_test *self)
{
	return __bpf_fill_atomic64(self, BPF_OR);
}

static int bpf_fill_atomic64_xor(struct bpf_test *self)
{
	return __bpf_fill_atomic64(self, BPF_XOR);
}

static int bpf_fill_atomic64_add_fetch(struct bpf_test *self)
{
	return __bpf_fill_atomic64(self, BPF_ADD | BPF_FETCH);
}

static int bpf_fill_atomic64_and_fetch(struct bpf_test *self)
{
	return __bpf_fill_atomic64(self, BPF_AND | BPF_FETCH);
}

static int bpf_fill_atomic64_or_fetch(struct bpf_test *self)
{
	return __bpf_fill_atomic64(self, BPF_OR | BPF_FETCH);
}

static int bpf_fill_atomic64_xor_fetch(struct bpf_test *self)
{
	return __bpf_fill_atomic64(self, BPF_XOR | BPF_FETCH);
}

static int bpf_fill_atomic64_xchg(struct bpf_test *self)
{
	return __bpf_fill_atomic64(self, BPF_XCHG);
}

static int bpf_fill_cmpxchg64(struct bpf_test *self)
{
	return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
				  &__bpf_emit_cmpxchg64);
}

/* 32-bit atomic operations */
static int bpf_fill_atomic32_add(struct bpf_test *self)
{
	return __bpf_fill_atomic32(self, BPF_ADD);
}

static int bpf_fill_atomic32_and(struct bpf_test *self)
{
	return __bpf_fill_atomic32(self, BPF_AND);
}

static int bpf_fill_atomic32_or(struct bpf_test *self)
{
	return __bpf_fill_atomic32(self, BPF_OR);
}

static int bpf_fill_atomic32_xor(struct bpf_test *self)
{
	return __bpf_fill_atomic32(self, BPF_XOR);
}

static int bpf_fill_atomic32_add_fetch(struct bpf_test *self)
{
	return __bpf_fill_atomic32(self, BPF_ADD | BPF_FETCH);
}

static int bpf_fill_atomic32_and_fetch(struct bpf_test *self)
{
	return __bpf_fill_atomic32(self, BPF_AND | BPF_FETCH);
}

static int bpf_fill_atomic32_or_fetch(struct bpf_test *self)
{
	return __bpf_fill_atomic32(self, BPF_OR | BPF_FETCH);
}

static int bpf_fill_atomic32_xor_fetch(struct bpf_test *self)
{
	return __bpf_fill_atomic32(self, BPF_XOR | BPF_FETCH);
}

static int bpf_fill_atomic32_xchg(struct bpf_test *self)
{
	return __bpf_fill_atomic32(self, BPF_XCHG);
}

static int bpf_fill_cmpxchg32(struct bpf_test *self)
{
	return __bpf_fill_pattern(self, NULL, 64, 64, 0, PATTERN_BLOCK2,
				  &__bpf_emit_cmpxchg32);
}

/*
 * Test JITs that implement ATOMIC operations as function calls or
 * other primitives, and must re-arrange operands for argument passing.
 */
static int __bpf_fill_atomic_reg_pairs(struct bpf_test *self, u8 width, u8 op)
{
	struct bpf_insn *insn;
	int len = 2 + 34 * 10 * 10;
	u64 mem, upd, res;
	int rd, rs, i = 0;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	/* Operand and memory values */
	if (width == BPF_DW) {
		mem = 0x0123456789abcdefULL;
		upd = 0xfedcba9876543210ULL;
	} else { /* BPF_W */
		mem = 0x01234567U;
		upd = 0x76543210U;
	}

	/* Memory updated according to operation */
	switch (op) {
	case BPF_XCHG:
		res = upd;
		break;
	case BPF_CMPXCHG:
		res = mem;
		break;
	default:
		__bpf_alu_result(&res, mem, upd, BPF_OP(op));
	}

	/* Test all operand registers */
	for (rd = R0; rd <= R9; rd++) {
		for (rs = R0; rs <= R9; rs++) {
			u64 cmp, src;

			/* Initialize value in memory */
			i += __bpf_ld_imm64(&insn[i], R0, mem);
			insn[i++] = BPF_STX_MEM(width, R10, R0, -8);

			/* Initialize registers in order */
			i += __bpf_ld_imm64(&insn[i], R0, ~mem);
			i += __bpf_ld_imm64(&insn[i], rs, upd);
			insn[i++] = BPF_MOV64_REG(rd, R10);

			/* Perform atomic operation */
			insn[i++] = BPF_ATOMIC_OP(width, op, rd, rs, -8);
			if (op == BPF_CMPXCHG && width == BPF_W)
				insn[i++] = BPF_ZEXT_REG(R0);

			/* Check R0 register value */
			if (op == BPF_CMPXCHG)
				cmp = mem;  /* Expect value from memory */
			else if (R0 == rd || R0 == rs)
				cmp = 0;    /* Aliased, checked below */
			else
				cmp = ~mem; /* Expect value to be preserved */
			if (cmp) {
				insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
							   (u32)cmp, 2);
				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
				insn[i++] = BPF_EXIT_INSN();
				insn[i++] = BPF_ALU64_IMM(BPF_RSH, R0, 32);
				insn[i++] = BPF_JMP32_IMM(BPF_JEQ, R0,
							   cmp >> 32, 2);
				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
				insn[i++] = BPF_EXIT_INSN();
			}

			/* Check source register value */
			if (rs == R0 && op == BPF_CMPXCHG)
				src = 0;   /* Aliased with R0, checked above */
			else if (rs == rd && (op == BPF_CMPXCHG ||
					      !(op & BPF_FETCH)))
				src = 0;   /* Aliased with rd, checked below */
			else if (op == BPF_CMPXCHG)
				src = upd; /* Expect value to be preserved */
			else if (op & BPF_FETCH)
				src = mem; /* Expect fetched value from mem */
			else /* no fetch */
				src = upd; /* Expect value to be preserved */
			if (src) {
				insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
							   (u32)src, 2);
				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
				insn[i++] = BPF_EXIT_INSN();
				insn[i++] = BPF_ALU64_IMM(BPF_RSH, rs, 32);
				insn[i++] = BPF_JMP32_IMM(BPF_JEQ, rs,
							   src >> 32, 2);
				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
				insn[i++] = BPF_EXIT_INSN();
			}

			/* Check destination register value */
			if (!(rd == R0 && op == BPF_CMPXCHG) &&
			    !(rd == rs && (op & BPF_FETCH))) {
				insn[i++] = BPF_JMP_REG(BPF_JEQ, rd, R10, 2);
				insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
				insn[i++] = BPF_EXIT_INSN();
			}

			/* Check value in memory */
			if (rs != rd) {                  /* No aliasing */
				i += __bpf_ld_imm64(&insn[i], R1, res);
			} else if (op == BPF_XCHG) {     /* Aliased, XCHG */
				insn[i++] = BPF_MOV64_REG(R1, R10);
			} else if (op == BPF_CMPXCHG) {  /* Aliased, CMPXCHG */
				i += __bpf_ld_imm64(&insn[i], R1, mem);
			} else {                        /* Aliased, ALU oper */
				i += __bpf_ld_imm64(&insn[i], R1, mem);
				insn[i++] = BPF_ALU64_REG(BPF_OP(op), R1, R10);
			}

			insn[i++] = BPF_LDX_MEM(width, R0, R10, -8);
			if (width == BPF_DW)
				insn[i++] = BPF_JMP_REG(BPF_JEQ, R0, R1, 2);
			else /* width == BPF_W */
				insn[i++] = BPF_JMP32_REG(BPF_JEQ, R0, R1, 2);
			insn[i++] = BPF_MOV32_IMM(R0, __LINE__);
			insn[i++] = BPF_EXIT_INSN();
		}
	}

	insn[i++] = BPF_MOV64_IMM(R0, 1);
	insn[i++] = BPF_EXIT_INSN();

	self->u.ptr.insns = insn;
	self->u.ptr.len = i;
	BUG_ON(i > len);

	return 0;
}

/* 64-bit atomic register tests */
static int bpf_fill_atomic64_add_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD);
}

static int bpf_fill_atomic64_and_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND);
}

static int bpf_fill_atomic64_or_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR);
}

static int bpf_fill_atomic64_xor_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR);
}

static int bpf_fill_atomic64_add_fetch_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_ADD | BPF_FETCH);
}

static int bpf_fill_atomic64_and_fetch_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_AND | BPF_FETCH);
}

static int bpf_fill_atomic64_or_fetch_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_OR | BPF_FETCH);
}

static int bpf_fill_atomic64_xor_fetch_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XOR | BPF_FETCH);
}

static int bpf_fill_atomic64_xchg_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_XCHG);
}

static int bpf_fill_atomic64_cmpxchg_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_DW, BPF_CMPXCHG);
}

/* 32-bit atomic register tests */
static int bpf_fill_atomic32_add_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD);
}

static int bpf_fill_atomic32_and_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND);
}

static int bpf_fill_atomic32_or_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR);
}

static int bpf_fill_atomic32_xor_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR);
}

static int bpf_fill_atomic32_add_fetch_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_ADD | BPF_FETCH);
}

static int bpf_fill_atomic32_and_fetch_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_AND | BPF_FETCH);
}

static int bpf_fill_atomic32_or_fetch_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_OR | BPF_FETCH);
}

static int bpf_fill_atomic32_xor_fetch_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XOR | BPF_FETCH);
}

static int bpf_fill_atomic32_xchg_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_XCHG);
}

static int bpf_fill_atomic32_cmpxchg_reg_pairs(struct bpf_test *self)
{
	return __bpf_fill_atomic_reg_pairs(self, BPF_W, BPF_CMPXCHG);
}

/*
 * Test the two-instruction 64-bit immediate load operation for all
 * power-of-two magnitudes of the immediate operand. For each MSB, a block
 * of immediate values centered around the power-of-two MSB are tested,
 * both for positive and negative values. The test is designed to verify
 * the operation for JITs that emit different code depending on the magnitude
 * of the immediate value. This is often the case if the native instruction
 * immediate field width is narrower than 32 bits.
 */
static int bpf_fill_ld_imm64_magn(struct bpf_test *self)
{
	int block = 64; /* Increase for more tests per MSB position */
	int len = 3 + 8 * 63 * block * 2;
	struct bpf_insn *insn;
	int bit, adj, sign;
	int i = 0;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);

	for (bit = 0; bit <= 62; bit++) {
		for (adj = -block / 2; adj < block / 2; adj++) {
			for (sign = -1; sign <= 1; sign += 2) {
				s64 imm = sign * ((1LL << bit) + adj);

				/* Perform operation */
				i += __bpf_ld_imm64(&insn[i], R1, imm);

				/* Load reference */
				insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
				insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3,
							  (u32)(imm >> 32));
				insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
				insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);

				/* Check result */
				insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
				insn[i++] = BPF_EXIT_INSN();
			}
		}
	}

	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
	insn[i++] = BPF_EXIT_INSN();

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;
	BUG_ON(i != len);

	return 0;
}

/*
 * Test the two-instruction 64-bit immediate load operation for different
 * combinations of bytes. Each byte in the 64-bit word is constructed as
 * (base & mask) | (rand() & ~mask), where rand() is a deterministic LCG.
 * All patterns (base1, mask1) and (base2, mask2) bytes are tested.
 */
static int __bpf_fill_ld_imm64_bytes(struct bpf_test *self,
				     u8 base1, u8 mask1,
				     u8 base2, u8 mask2)
{
	struct bpf_insn *insn;
	int len = 3 + 8 * BIT(8);
	int pattern, index;
	u32 rand = 1;
	int i = 0;

	insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
	if (!insn)
		return -ENOMEM;

	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 0);

	for (pattern = 0; pattern < BIT(8); pattern++) {
		u64 imm = 0;

		for (index = 0; index < 8; index++) {
			int byte;

			if (pattern & BIT(index))
				byte = (base1 & mask1) | (rand & ~mask1);
			else
				byte = (base2 & mask2) | (rand & ~mask2);
			imm = (imm << 8) | byte;
		}

		/* Update our LCG */
		rand = rand * 1664525 + 1013904223;

		/* Perform operation */
		i += __bpf_ld_imm64(&insn[i], R1, imm);

		/* Load reference */
		insn[i++] = BPF_ALU32_IMM(BPF_MOV, R2, imm);
		insn[i++] = BPF_ALU32_IMM(BPF_MOV, R3, (u32)(imm >> 32));
		insn[i++] = BPF_ALU64_IMM(BPF_LSH, R3, 32);
		insn[i++] = BPF_ALU64_REG(BPF_OR, R2, R3);

		/* Check result */
		insn[i++] = BPF_JMP_REG(BPF_JEQ, R1, R2, 1);
		insn[i++] = BPF_EXIT_INSN();
	}

	insn[i++] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
	insn[i++] = BPF_EXIT_INSN();

	self->u.ptr.insns = insn;
	self->u.ptr.len = len;
	BUG_ON(i != len);

	return 0;
}

static int bpf_fill_ld_imm64_checker(struct bpf_test *self)
{
	return __bpf_fill_ld_imm64_bytes(self, 0, 0xff, 0xff, 0xff);
}

static int bpf_fill_ld_imm64_pos_neg(struct bpf_test *self)
{
	return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0x80, 0x80);
}

static int bpf_fill_ld_imm64_pos_zero(struct bpf_test *self)
{
	return __bpf_fill_ld_imm64_bytes(self, 1, 0x81, 0, 0xff);
}

static int bpf_fill_ld_imm64_neg_zero(struct bpf_test *self)
{
	return __bpf_fill_ld_imm64_bytes(self, 0x80, 0x80, 0, 0xff);
}

/*
 * Exhaustive tests of JMP operations for all combinations of power-of-two
 * magnitudes of the operands, both for positive and negative values. The
 * test is designed to verify e.g. the JMP and JMP32 operations for JITs that
 * emit different code depending on the magnitude of the immediate value.
 */

static bool __bpf_match_jmp_cond(s64 v1, s64 v2, u8 op)
{
	switch (op) {
	case BPF_JSET:
		return !!(v1 & v2);
	case BPF_JEQ:
		return v1 == v2;
	case BPF_JNE:
		return v1 != v2;
	case BPF_JGT:
		return (u64)v1 > (u64)v2;
	case BPF_JGE:
		return (u64)v1 >= (u64)v2;
	case BPF_JLT:
		return (u64)v1 < (u64)v2;
	case BPF_JLE:
		return (u64)v1 <= (u64)v2;
	case BPF_JSGT:
		return v1 > v2;
	case BPF_JSGE:
		return v1 >= v2;
	case BPF_JSLT:
		return v1 < v2;
	case BPF_JSLE:
		return v1 <= v2;
	}
	return false;
}

static int __bpf_emit_jmp_imm(struct bpf_test *self, void *arg,
			      struct bpf_insn *insns, s64 dst, s64 imm)
{
	int op = *(int *)arg;

	if (insns) {
		bool match = __bpf_match_jmp_cond(dst, (s32)imm, op);
		int i = 0;

		insns[i++] = BPF_ALU32_IMM(BPF_MOV, R0, match);

		i += __bpf_ld_imm64(&insns[i], R1, dst);
		insns[i++] = BPF_JMP_IMM(op, R1, imm, 1);
		if (!match)
			insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
		insns[i++] = BPF_EXIT_INSN();

		return i;
	}

	return 5 + 1;
}

static int __bpf_emit_jmp32_imm(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 imm)
{
	int op = *(int *)arg;

	if (insns) {
		bool match = __bpf_match_jmp_cond((s32)dst, (s32)imm, op);
		int i = 0;

		i += __bpf_ld_imm64(&insns[i], R1, dst);
		insns[i++] = BPF_JMP32_IMM(op, R1, imm, 1);
		if (!match)
			insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
		insns[i++] = BPF_EXIT_INSN();

		return i;
	}

	return 5;
}

static int __bpf_emit_jmp_reg(struct bpf_test *self, void *arg,
			      struct bpf_insn *insns, s64 dst, s64 src)
{
	int op = *(int *)arg;

	if (insns) {
		bool match = __bpf_match_jmp_cond(dst, src, op);
		int i = 0;

		i += __bpf_ld_imm64(&insns[i], R1, dst);
		i += __bpf_ld_imm64(&insns[i], R2, src);
		insns[i++] = BPF_JMP_REG(op, R1, R2, 1);
		if (!match)
			insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
		insns[i++] = BPF_EXIT_INSN();

		return i;
	}

	return 7;
}

static int __bpf_emit_jmp32_reg(struct bpf_test *self, void *arg,
				struct bpf_insn *insns, s64 dst, s64 src)
{
	int op = *(int *)arg;

	if (insns) {
		bool match = __bpf_match_jmp_cond((s32)dst, (s32)src, op);
		int i = 0;

		i += __bpf_ld_imm64(&insns[i], R1, dst);
		i += __bpf_ld_imm64(&insns[i], R2, src);
		insns[i++] = BPF_JMP32_REG(op, R1, R2, 1);
		if (!match)
			insns[i++] = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
		insns[i++] = BPF_EXIT_INSN();

		return i;
	}

	return 7;
}

static int __bpf_fill_jmp_imm(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 32,
				  PATTERN_BLOCK1, PATTERN_BLOCK2,
				  &__bpf_emit_jmp_imm);
}

static int __bpf_fill_jmp32_imm(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 32,
				  PATTERN_BLOCK1, PATTERN_BLOCK2,
				  &__bpf_emit_jmp32_imm);
}

static int __bpf_fill_jmp_reg(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 64,
				  PATTERN_BLOCK1, PATTERN_BLOCK2,
				  &__bpf_emit_jmp_reg);
}

static int __bpf_fill_jmp32_reg(struct bpf_test *self, int op)
{
	return __bpf_fill_pattern(self, &op, 64, 64,
				  PATTERN_BLOCK1, PATTERN_BLOCK2,
				  &__bpf_emit_jmp32_reg);
}

/* JMP immediate tests */
static int bpf_fill_jmp_jset_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JSET);
}

static int bpf_fill_jmp_jeq_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JEQ);
}

static int bpf_fill_jmp_jne_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JNE);
}

static int bpf_fill_jmp_jgt_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JGT);
}

static int bpf_fill_jmp_jge_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JGE);
}

static int bpf_fill_jmp_jlt_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JLT);
}

static int bpf_fill_jmp_jle_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JLE);
}

static int bpf_fill_jmp_jsgt_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JSGT);
}

static int bpf_fill_jmp_jsge_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JSGE);
}

static int bpf_fill_jmp_jslt_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JSLT);
}

static int bpf_fill_jmp_jsle_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp_imm(self, BPF_JSLE);
}

/* JMP32 immediate tests */
static int bpf_fill_jmp32_jset_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JSET);
}

static int bpf_fill_jmp32_jeq_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JEQ);
}

static int bpf_fill_jmp32_jne_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JNE);
}

static int bpf_fill_jmp32_jgt_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JGT);
}

static int bpf_fill_jmp32_jge_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JGE);
}

static int bpf_fill_jmp32_jlt_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JLT);
}

static int bpf_fill_jmp32_jle_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JLE);
}

static int bpf_fill_jmp32_jsgt_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JSGT);
}

static int bpf_fill_jmp32_jsge_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JSGE);
}

static int bpf_fill_jmp32_jslt_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JSLT);
}

static int bpf_fill_jmp32_jsle_imm(struct bpf_test *self)
{
	return __bpf_fill_jmp32_imm(self, BPF_JSLE);
}

/* JMP register tests */
static int bpf_fill_jmp_jset_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JSET);
}

static int bpf_fill_jmp_jeq_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JEQ);
}

static int bpf_fill_jmp_jne_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JNE);
}

static int bpf_fill_jmp_jgt_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JGT);
}

static int bpf_fill_jmp_jge_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JGE);
}

static int bpf_fill_jmp_jlt_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JLT);
}

static int bpf_fill_jmp_jle_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JLE);
}

static int bpf_fill_jmp_jsgt_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JSGT);
}

static int bpf_fill_jmp_jsge_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JSGE);
}

static int bpf_fill_jmp_jslt_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JSLT);
}

static int bpf_fill_jmp_jsle_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp_reg(self, BPF_JSLE);
}

/* JMP32 register tests */
static int bpf_fill_jmp32_jset_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JSET);
}

static int bpf_fill_jmp32_jeq_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JEQ);
}

static int bpf_fill_jmp32_jne_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JNE);
}

static int bpf_fill_jmp32_jgt_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JGT);
}

static int bpf_fill_jmp32_jge_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JGE);
}

static int bpf_fill_jmp32_jlt_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JLT);
}

static int bpf_fill_jmp32_jle_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JLE);
}

static int bpf_fill_jmp32_jsgt_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JSGT);
}

static int bpf_fill_jmp32_jsge_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JSGE);
}

static int bpf_fill_jmp32_jslt_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JSLT);
}

static int bpf_fill_jmp32_jsle_reg(struct bpf_test *self)
{
	return __bpf_fill_jmp32_reg(self, BPF_JSLE);
}

/*
 * Set up a sequence of staggered jumps, forwards and backwards with
 * increasing offset. This tests the conversion of relative jumps to
 * JITed native jumps. On some architectures, for example MIPS, a large
 * PC-relative jump offset may overflow the immediate field of the native
 * conditional branch instruction, triggering a conversion to use an
 * absolute jump instead. Since this changes the jump offsets, another
 * offset computation pass is necessary, and that may in turn trigger
 * another branch conversion. This jump sequence is particularly nasty
 * in that regard.
 *
 * The sequence generation is parameterized by size and jump type.
 * The size must be even, and the expected result is always size + 1.
 * Below is an example with size=8 and result=9.
 *
 *                     ________________________Start
 *                     R0 = 0
 *                     R1 = r1
 *                     R2 = r2
 *            ,------- JMP +4 * 3______________Preamble: 4 insns
 * ,----------|-ind 0- if R0 != 7 JMP 8 * 3 + 1 <--------------------.
 * |          |        R0 = 8                                        |
 * |          |        JMP +7 * 3               ------------------------.
 * | ,--------|-----1- if R0 != 5 JMP 7 * 3 + 1 <--------------.     |  |
 * | |        |        R0 = 6                                  |     |  |
 * | |        |        JMP +5 * 3               ------------------.  |  |
 * | | ,------|-----2- if R0 != 3 JMP 6 * 3 + 1 <--------.     |  |  |  |
 * | | |      |        R0 = 4                            |     |  |  |  |
 * | | |      |        JMP +3 * 3               ------------.  |  |  |  |
 * | | | ,----|-----3- if R0 != 1 JMP 5 * 3 + 1 <--.     |  |  |  |  |  |
 * | | | |    |        R0 = 2                      |     |  |  |  |  |  |
 * | | | |    |        JMP +1 * 3               ------.  |  |  |  |  |  |
 * | | | | ,--t=====4> if R0 != 0 JMP 4 * 3 + 1    1  2  3  4  5  6  7  8 loc
 * | | | | |           R0 = 1                     -1 +2 -3 +4 -5 +6 -7 +8 off
 * | | | | |           JMP -2 * 3               ---'  |  |  |  |  |  |  |
 * | | | | | ,------5- if R0 != 2 JMP 3 * 3 + 1 <-----'  |  |  |  |  |  |
 * | | | | | |         R0 = 3                            |  |  |  |  |  |
 * | | | | | |         JMP -4 * 3               ---------'  |  |  |  |  |
 * | | | | | | ,----6- if R0 != 4 JMP 2 * 3 + 1 <-----------'  |  |  |  |
 * | | | | | | |       R0 = 5                                  |  |  |  |
 * | | | | | | |       JMP -6 * 3               ---------------'  |  |  |
 * | | | | | | | ,--7- if R0 != 6 JMP 1 * 3 + 1 <-----------------'  |  |
 * | | | | | | | |     R0 = 7                                        |  |
 * | | Error | | |     JMP -8 * 3               ---------------------'  |
 * | | paths | | | ,8- if R0 != 8 JMP 0 * 3 + 1 <-----------------------'
 * | | | | | | | | |   R0 = 9__________________Sequence: 3 * size - 1 insns
 * `-+-+-+-+-+-+-+-+-> EXIT____________________Return: 1 insn
 *
 */

/* The maximum size parameter */
#define MAX_STAGGERED_JMP_SIZE ((0x7fff / 3) & ~1)

/* We use a reduced number of iterations to get a reasonable execution time */
#define NR_STAGGERED_JMP_RUNS 10

static int __bpf_fill_staggered_jumps(struct bpf_test *self,
				      const struct bpf_insn *jmp,
				      u64 r1, u64 r2)
{
	int size = self->test[0].result - 1;
	int len = 4 + 3 * (size + 1);
	struct bpf_insn *insns;
	int off, ind;

	insns = kmalloc_array(len, sizeof(*insns), GFP_KERNEL);
	if (!insns)
		return -ENOMEM;

	/* Preamble */
	insns[0] = BPF_ALU64_IMM(BPF_MOV, R0, 0);
	insns[1] = BPF_ALU64_IMM(BPF_MOV, R1, r1);
	insns[2] = BPF_ALU64_IMM(BPF_MOV, R2, r2);
	insns[3] = BPF_JMP_IMM(BPF_JA, 0, 0, 3 * size / 2);

	/* Sequence */
	for (ind = 0, off = size; ind <= size; ind++, off -= 2) {
		struct bpf_insn *ins = &insns[4 + 3 * ind];
		int loc;

		if (off == 0)
			off--;

		loc = abs(off);
		ins[0] = BPF_JMP_IMM(BPF_JNE, R0, loc - 1,
				     3 * (size - ind) + 1);
		ins[1] = BPF_ALU64_IMM(BPF_MOV, R0, loc);
		ins[2] = *jmp;
		ins[2].off = 3 * (off - 1);
	}

	/* Return */
	insns[len - 1] = BPF_EXIT_INSN();

	self->u.ptr.insns = insns;
	self->u.ptr.len = len;

	return 0;
}

/* 64-bit unconditional jump */
static int bpf_fill_staggered_ja(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JA, 0, 0, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 0, 0);
}

/* 64-bit immediate jumps */
static int bpf_fill_staggered_jeq_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JEQ, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}

static int bpf_fill_staggered_jne_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JNE, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
}

static int bpf_fill_staggered_jset_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSET, R1, 0x82, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
}

static int bpf_fill_staggered_jgt_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGT, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
}

static int bpf_fill_staggered_jge_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JGE, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}

static int bpf_fill_staggered_jlt_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLT, R1, 0x80000000, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}

static int bpf_fill_staggered_jle_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JLE, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}

static int bpf_fill_staggered_jsgt_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGT, R1, -2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
}

static int bpf_fill_staggered_jsge_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSGE, R1, -2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
}

static int bpf_fill_staggered_jslt_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLT, R1, -1, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
}

static int bpf_fill_staggered_jsle_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_IMM(BPF_JSLE, R1, -1, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
}

/* 64-bit register jumps */
static int bpf_fill_staggered_jeq_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JEQ, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}

static int bpf_fill_staggered_jne_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JNE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
}

static int bpf_fill_staggered_jset_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSET, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
}

static int bpf_fill_staggered_jgt_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JGT, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
}

static int bpf_fill_staggered_jge_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JGE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}

static int bpf_fill_staggered_jlt_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JLT, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
}

static int bpf_fill_staggered_jle_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JLE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}

static int bpf_fill_staggered_jsgt_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGT, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
}

static int bpf_fill_staggered_jsge_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSGE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
}

static int bpf_fill_staggered_jslt_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLT, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
}

static int bpf_fill_staggered_jsle_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP_REG(BPF_JSLE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
}

/* 32-bit immediate jumps */
static int bpf_fill_staggered_jeq32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JEQ, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}

static int bpf_fill_staggered_jne32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JNE, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 4321, 0);
}

static int bpf_fill_staggered_jset32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSET, R1, 0x82, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0);
}

static int bpf_fill_staggered_jgt32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGT, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 0);
}

static int bpf_fill_staggered_jge32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JGE, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}

static int bpf_fill_staggered_jlt32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLT, R1, 0x80000000, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}

static int bpf_fill_staggered_jle32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JLE, R1, 1234, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0);
}

static int bpf_fill_staggered_jsgt32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGT, R1, -2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
}

static int bpf_fill_staggered_jsge32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSGE, R1, -2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
}

static int bpf_fill_staggered_jslt32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLT, R1, -1, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -2, 0);
}

static int bpf_fill_staggered_jsle32_imm(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_IMM(BPF_JSLE, R1, -1, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -1, 0);
}

/* 32-bit register jumps */
static int bpf_fill_staggered_jeq32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JEQ, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}

static int bpf_fill_staggered_jne32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JNE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 4321, 1234);
}

static int bpf_fill_staggered_jset32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSET, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 0x86, 0x82);
}

static int bpf_fill_staggered_jgt32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGT, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 0x80000000, 1234);
}

static int bpf_fill_staggered_jge32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JGE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}

static int bpf_fill_staggered_jlt32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLT, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 0x80000000);
}

static int bpf_fill_staggered_jle32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JLE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, 1234, 1234);
}

static int bpf_fill_staggered_jsgt32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGT, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -1, -2);
}

static int bpf_fill_staggered_jsge32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSGE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -2, -2);
}

static int bpf_fill_staggered_jslt32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLT, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -2, -1);
}

static int bpf_fill_staggered_jsle32_reg(struct bpf_test *self)
{
	struct bpf_insn jmp = BPF_JMP32_REG(BPF_JSLE, R1, R2, 0);

	return __bpf_fill_staggered_jumps(self, &jmp, -1, -1);
}


static struct bpf_test tests[] = {
	{
		"TAX",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_ALU | BPF_NEG, 0), /* A == -3 */
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_LEN, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_MISC | BPF_TAX, 0), /* X == len - 3 */
			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 1),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ 10, 20, 30, 40, 50 },
		{ { 2, 10 }, { 3, 20 }, { 4, 30 } },
	},
	{
		"TXA",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_RET | BPF_A, 0) /* A == len * 2 */
		},
		CLASSIC,
		{ 10, 20, 30, 40, 50 },
		{ { 1, 2 }, { 3, 6 }, { 4, 8 } },
	},
	{
		"ADD_SUB_MUL_K",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, 1),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 2),
			BPF_STMT(BPF_LDX | BPF_IMM, 3),
			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 0xffffffff),
			BPF_STMT(BPF_ALU | BPF_MUL | BPF_K, 3),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC | FLAG_NO_DATA,
		{ },
		{ { 0, 0xfffffffd } }
	},
	{
		"DIV_MOD_KX",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, 8),
			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 2),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
			BPF_STMT(BPF_ALU | BPF_DIV | BPF_X, 0),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0x70000000),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
			BPF_STMT(BPF_ALU | BPF_MOD | BPF_X, 0),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff),
			BPF_STMT(BPF_ALU | BPF_MOD | BPF_K, 0x70000000),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC | FLAG_NO_DATA,
		{ },
		{ { 0, 0x20000000 } }
	},
	{
		"AND_OR_LSH_K",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, 0xff),
			BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
			BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 27),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_IMM, 0xf),
			BPF_STMT(BPF_ALU | BPF_OR | BPF_K, 0xf0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC | FLAG_NO_DATA,
		{ },
		{ { 0, 0x800000ff }, { 1, 0x800000ff } },
	},
	{
		"LD_IMM_0",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, 0), /* ld #0 */
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 0),
			BPF_STMT(BPF_RET | BPF_K, 1),
		},
		CLASSIC,
		{ },
		{ { 1, 1 } },
	},
	{
		"LD_IND",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_IND, MAX_K),
			BPF_STMT(BPF_RET | BPF_K, 1)
		},
		CLASSIC,
		{ },
		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
	},
	{
		"LD_ABS",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS, 1000),
			BPF_STMT(BPF_RET | BPF_K, 1)
		},
		CLASSIC,
		{ },
		{ { 1, 0 }, { 10, 0 }, { 60, 0 } },
	},
	{
		"LD_ABS_LL",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_LL_OFF + 1),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ 1, 2, 3 },
		{ { 1, 0 }, { 2, 3 } },
	},
	{
		"LD_IND_LL",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, SKF_LL_OFF - 1),
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ 1, 2, 3, 0xff },
		{ { 1, 1 }, { 3, 3 }, { 4, 0xff } },
	},
	{
		"LD_ABS_NET",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, SKF_NET_OFF + 1),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
		{ { 15, 0 }, { 16, 3 } },
	},
	{
		"LD_IND_NET",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, SKF_NET_OFF - 15),
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 },
		{ { 14, 0 }, { 15, 1 }, { 17, 3 } },
	},
	{
		"LD_PKTTYPE",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_PKTTYPE),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 1),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_PKTTYPE),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 1),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_PKTTYPE),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, SKB_TYPE, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 1),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ { 1, 3 }, { 10, 3 } },
	},
	{
		"LD_MARK",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_MARK),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ { 1, SKB_MARK}, { 10, SKB_MARK} },
	},
	{
		"LD_RXHASH",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_RXHASH),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ { 1, SKB_HASH}, { 10, SKB_HASH} },
	},
	{
		"LD_QUEUE",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_QUEUE),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ { 1, SKB_QUEUE_MAP }, { 10, SKB_QUEUE_MAP } },
	},
	{
		"LD_PROTOCOL",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 1),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 20, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 0),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_PROTOCOL),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 30, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 0),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ 10, 20, 30 },
		{ { 10, ETH_P_IP }, { 100, ETH_P_IP } },
	},
	{
		"LD_VLAN_TAG",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_VLAN_TAG),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{
			{ 1, SKB_VLAN_TCI },
			{ 10, SKB_VLAN_TCI }
		},
	},
	{
		"LD_VLAN_TAG_PRESENT",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{
			{ 1, SKB_VLAN_PRESENT },
			{ 10, SKB_VLAN_PRESENT }
		},
	},
	{
		"LD_IFINDEX",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_IFINDEX),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ { 1, SKB_DEV_IFINDEX }, { 10, SKB_DEV_IFINDEX } },
	},
	{
		"LD_HATYPE",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_HATYPE),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ { 1, SKB_DEV_TYPE }, { 10, SKB_DEV_TYPE } },
	},
	{
		"LD_CPU",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_CPU),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_CPU),
			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ { 1, 0 }, { 10, 0 } },
	},
	{
		"LD_NLATTR",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_IMM, 2),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_LDX | BPF_IMM, 3),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_NLATTR),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
#ifdef __BIG_ENDIAN
		{ 0xff, 0xff, 0, 4, 0, 2, 0, 4, 0, 3 },
#else
		{ 0xff, 0xff, 4, 0, 2, 0, 4, 0, 3, 0 },
#endif
		{ { 4, 0 }, { 20, 6 } },
	},
	{
		"LD_NLATTR_NEST",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_LDX | BPF_IMM, 3),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_NLATTR_NEST),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
#ifdef __BIG_ENDIAN
		{ 0xff, 0xff, 0, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3 },
#else
		{ 0xff, 0xff, 12, 0, 1, 0, 4, 0, 2, 0, 4, 0, 3, 0 },
#endif
		{ { 4, 0 }, { 20, 10 } },
	},
	{
		"LD_PAYLOAD_OFF",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_PAY_OFFSET),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		/* 00:00:00:00:00:00 > 00:00:00:00:00:00, ethtype IPv4 (0x0800),
		 * length 98: 127.0.0.1 > 127.0.0.1: ICMP echo request,
		 * id 9737, seq 1, length 64
		 */
		{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
		  0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
		  0x08, 0x00,
		  0x45, 0x00, 0x00, 0x54, 0xac, 0x8b, 0x40, 0x00, 0x40,
		  0x01, 0x90, 0x1b, 0x7f, 0x00, 0x00, 0x01 },
		{ { 30, 0 }, { 100, 42 } },
	},
	{
		"LD_ANC_XOR",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, 10),
			BPF_STMT(BPF_LDX | BPF_IMM, 300),
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_ALU_XOR_X),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ { 4, 0xA ^ 300 }, { 20, 0xA ^ 300 } },
	},
	{
		"SPILL_FILL",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_LD | BPF_IMM, 2),
			BPF_STMT(BPF_ALU | BPF_RSH, 1),
			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
			BPF_STMT(BPF_ST, 1), /* M1 = 1 ^ len */
			BPF_STMT(BPF_ALU | BPF_XOR | BPF_K, 0x80000000),
			BPF_STMT(BPF_ST, 2), /* M2 = 1 ^ len ^ 0x80000000 */
			BPF_STMT(BPF_STX, 15), /* M3 = len */
			BPF_STMT(BPF_LDX | BPF_MEM, 1),
			BPF_STMT(BPF_LD | BPF_MEM, 2),
			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 15),
			BPF_STMT(BPF_ALU | BPF_XOR | BPF_X, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ { 1, 0x80000001 }, { 2, 0x80000002 }, { 60, 0x80000000 ^ 60 } }
	},
	{
		"JEQ",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 1),
			BPF_STMT(BPF_RET | BPF_K, 1),
			BPF_STMT(BPF_RET | BPF_K, MAX_K)
		},
		CLASSIC,
		{ 3, 3, 3, 3, 3 },
		{ { 1, 0 }, { 3, 1 }, { 4, MAX_K } },
	},
	{
		"JGT",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
			BPF_JUMP(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 1),
			BPF_STMT(BPF_RET | BPF_K, 1),
			BPF_STMT(BPF_RET | BPF_K, MAX_K)
		},
		CLASSIC,
		{ 4, 4, 4, 3, 3 },
		{ { 2, 0 }, { 3, 1 }, { 4, MAX_K } },
	},
	{
		"JGE (jt 0), test 1",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
			BPF_STMT(BPF_RET | BPF_K, 1),
			BPF_STMT(BPF_RET | BPF_K, MAX_K)
		},
		CLASSIC,
		{ 4, 4, 4, 3, 3 },
		{ { 2, 0 }, { 3, 1 }, { 4, 1 } },
	},
	{
		"JGE (jt 0), test 2",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2),
			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1),
			BPF_STMT(BPF_RET | BPF_K, 1),
			BPF_STMT(BPF_RET | BPF_K, MAX_K)
		},
		CLASSIC,
		{ 4, 4, 5, 3, 3 },
		{ { 4, 1 }, { 5, 1 }, { 6, MAX_K } },
	},
	{
		"JGE",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_LD | BPF_B | BPF_IND, MAX_K),
			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 1, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 10),
			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 2, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 20),
			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 3, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 30),
			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 4, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 40),
			BPF_STMT(BPF_RET | BPF_K, MAX_K)
		},
		CLASSIC,
		{ 1, 2, 3, 4, 5 },
		{ { 1, 20 }, { 3, 40 }, { 5, MAX_K } },
	},
	{
		"JSET",
		.u.insns = {
			BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
			BPF_JUMP(BPF_JMP | BPF_JA, 1, 1, 1),
			BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
			BPF_JUMP(BPF_JMP | BPF_JA, 0, 0, 0),
			BPF_STMT(BPF_LDX | BPF_LEN, 0),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_SUB | BPF_K, 4),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_LD | BPF_W | BPF_IND, 0),
			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 1, 0, 1),
			BPF_STMT(BPF_RET | BPF_K, 10),
			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x80000000, 0, 1),
			BPF_STMT(BPF_RET | BPF_K, 20),
			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 30),
			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 30),
			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 30),
			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 30),
			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0xffffff, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 30),
			BPF_STMT(BPF_RET | BPF_K, MAX_K)
		},
		CLASSIC,
		{ 0, 0xAA, 0x55, 1 },
		{ { 4, 10 }, { 5, 20 }, { 6, MAX_K } },
	},
	{
		"tcpdump port 22",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 0, 8), /* IPv6 */
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 20),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 17),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 54),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 14, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 56),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 12, 13),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x0800, 0, 12), /* IPv4 */
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x84, 2, 0),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 1, 0),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x11, 0, 8),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 6, 0),
			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 1),
			BPF_STMT(BPF_RET | BPF_K, 0xffff),
			BPF_STMT(BPF_RET | BPF_K, 0),
		},
		CLASSIC,
		/* 3c:07:54:43:e5:76 > 10:bf:48:d6:43:d6, ethertype IPv4(0x0800)
		 * length 114: 10.1.1.149.49700 > 10.1.2.10.22: Flags [P.],
		 * seq 1305692979:1305693027, ack 3650467037, win 65535,
		 * options [nop,nop,TS val 2502645400 ecr 3971138], length 48
		 */
		{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
		  0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
		  0x08, 0x00,
		  0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
		  0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
		  0x0a, 0x01, 0x01, 0x95, /* ip src */
		  0x0a, 0x01, 0x02, 0x0a, /* ip dst */
		  0xc2, 0x24,
		  0x00, 0x16 /* dst port */ },
		{ { 10, 0 }, { 30, 0 }, { 100, 65535 } },
	},
	{
		"tcpdump complex",
		.u.insns = {
			/* tcpdump -nei eth0 'tcp port 22 and (((ip[2:2] -
			 * ((ip[0]&0xf)<<2)) - ((tcp[12]&0xf0)>>2)) != 0) and
			 * (len > 115 or len < 30000000000)' -d
			 */
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 12),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x86dd, 30, 0),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x800, 0, 29),
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 23),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0x6, 0, 27),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 20),
			BPF_JUMP(BPF_JMP | BPF_JSET | BPF_K, 0x1fff, 25, 0),
			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 14),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 2, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_IND, 16),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 22, 0, 20),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 16),
			BPF_STMT(BPF_ST, 1),
			BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 14),
			BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf),
			BPF_STMT(BPF_ALU | BPF_LSH | BPF_K, 2),
			BPF_STMT(BPF_MISC | BPF_TAX, 0x5), /* libpcap emits K on TAX */
			BPF_STMT(BPF_LD | BPF_MEM, 1),
			BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0),
			BPF_STMT(BPF_ST, 5),
			BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 14),
			BPF_STMT(BPF_LD | BPF_B | BPF_IND, 26),
			BPF_STMT(BPF_ALU | BPF_AND | BPF_K, 0xf0),
			BPF_STMT(BPF_ALU | BPF_RSH | BPF_K, 2),
			BPF_STMT(BPF_MISC | BPF_TAX, 0x9), /* libpcap emits K on TAX */
			BPF_STMT(BPF_LD | BPF_MEM, 5),
			BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_X, 0, 4, 0),
			BPF_STMT(BPF_LD | BPF_LEN, 0),
			BPF_JUMP(BPF_JMP | BPF_JGT | BPF_K, 0x73, 1, 0),
			BPF_JUMP(BPF_JMP | BPF_JGE | BPF_K, 0xfc23ac00, 1, 0),
			BPF_STMT(BPF_RET | BPF_K, 0xffff),
			BPF_STMT(BPF_RET | BPF_K, 0),
		},
		CLASSIC,
		{ 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6,
		  0x3c, 0x07, 0x54, 0x43, 0xe5, 0x76,
		  0x08, 0x00,
		  0x45, 0x10, 0x00, 0x64, 0x75, 0xb5,
		  0x40, 0x00, 0x40, 0x06, 0xad, 0x2e, /* IP header */
		  0x0a, 0x01, 0x01, 0x95, /* ip src */
		  0x0a, 0x01, 0x02, 0x0a, /* ip dst */
		  0xc2, 0x24,
		  0x00, 0x16 /* dst port */ },
		{ { 10, 0 }, { 30, 0 }, { 100, 65535 } },
	},
	{
		"RET_A",
		.u.insns = {
			/* check that uninitialized X and A contain zeros */
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_RET | BPF_A, 0)
		},
		CLASSIC,
		{ },
		{ {1, 0}, {2, 0} },
	},
	{
		"INT: ADD trivial",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R1, 1),
			BPF_ALU64_IMM(BPF_ADD, R1, 2),
			BPF_ALU64_IMM(BPF_MOV, R2, 3),
			BPF_ALU64_REG(BPF_SUB, R1, R2),
			BPF_ALU64_IMM(BPF_ADD, R1, -1),
			BPF_ALU64_IMM(BPF_MUL, R1, 3),
			BPF_ALU64_REG(BPF_MOV, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0xfffffffd } }
	},
	{
		"INT: MUL_X",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, -1),
			BPF_ALU64_IMM(BPF_MOV, R1, -1),
			BPF_ALU64_IMM(BPF_MOV, R2, 3),
			BPF_ALU64_REG(BPF_MUL, R1, R2),
			BPF_JMP_IMM(BPF_JEQ, R1, 0xfffffffd, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_IMM(BPF_MOV, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } }
	},
	{
		"INT: MUL_X2",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, -1),
			BPF_ALU32_IMM(BPF_MOV, R1, -1),
			BPF_ALU32_IMM(BPF_MOV, R2, 3),
			BPF_ALU64_REG(BPF_MUL, R1, R2),
			BPF_ALU64_IMM(BPF_RSH, R1, 8),
			BPF_JMP_IMM(BPF_JEQ, R1, 0x2ffffff, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_IMM(BPF_MOV, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } }
	},
	{
		"INT: MUL32_X",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, -1),
			BPF_ALU64_IMM(BPF_MOV, R1, -1),
			BPF_ALU32_IMM(BPF_MOV, R2, 3),
			BPF_ALU32_REG(BPF_MUL, R1, R2),
			BPF_ALU64_IMM(BPF_RSH, R1, 8),
			BPF_JMP_IMM(BPF_JEQ, R1, 0xffffff, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_IMM(BPF_MOV, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } }
	},
	{
		/* Have to test all register combinations, since
		 * JITing of different registers will produce
		 * different asm code.
		 */
		"INT: ADD 64-bit",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, 0),
			BPF_ALU64_IMM(BPF_MOV, R1, 1),
			BPF_ALU64_IMM(BPF_MOV, R2, 2),
			BPF_ALU64_IMM(BPF_MOV, R3, 3),
			BPF_ALU64_IMM(BPF_MOV, R4, 4),
			BPF_ALU64_IMM(BPF_MOV, R5, 5),
			BPF_ALU64_IMM(BPF_MOV, R6, 6),
			BPF_ALU64_IMM(BPF_MOV, R7, 7),
			BPF_ALU64_IMM(BPF_MOV, R8, 8),
			BPF_ALU64_IMM(BPF_MOV, R9, 9),
			BPF_ALU64_IMM(BPF_ADD, R0, 20),
			BPF_ALU64_IMM(BPF_ADD, R1, 20),
			BPF_ALU64_IMM(BPF_ADD, R2, 20),
			BPF_ALU64_IMM(BPF_ADD, R3, 20),
			BPF_ALU64_IMM(BPF_ADD, R4, 20),
			BPF_ALU64_IMM(BPF_ADD, R5, 20),
			BPF_ALU64_IMM(BPF_ADD, R6, 20),
			BPF_ALU64_IMM(BPF_ADD, R7, 20),
			BPF_ALU64_IMM(BPF_ADD, R8, 20),
			BPF_ALU64_IMM(BPF_ADD, R9, 20),
			BPF_ALU64_IMM(BPF_SUB, R0, 10),
			BPF_ALU64_IMM(BPF_SUB, R1, 10),
			BPF_ALU64_IMM(BPF_SUB, R2, 10),
			BPF_ALU64_IMM(BPF_SUB, R3, 10),
			BPF_ALU64_IMM(BPF_SUB, R4, 10),
			BPF_ALU64_IMM(BPF_SUB, R5, 10),
			BPF_ALU64_IMM(BPF_SUB, R6, 10),
			BPF_ALU64_IMM(BPF_SUB, R7, 10),
			BPF_ALU64_IMM(BPF_SUB, R8, 10),
			BPF_ALU64_IMM(BPF_SUB, R9, 10),
			BPF_ALU64_REG(BPF_ADD, R0, R0),
			BPF_ALU64_REG(BPF_ADD, R0, R1),
			BPF_ALU64_REG(BPF_ADD, R0, R2),
			BPF_ALU64_REG(BPF_ADD, R0, R3),
			BPF_ALU64_REG(BPF_ADD, R0, R4),
			BPF_ALU64_REG(BPF_ADD, R0, R5),
			BPF_ALU64_REG(BPF_ADD, R0, R6),
			BPF_ALU64_REG(BPF_ADD, R0, R7),
			BPF_ALU64_REG(BPF_ADD, R0, R8),
			BPF_ALU64_REG(BPF_ADD, R0, R9), /* R0 == 155 */
			BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_ADD, R1, R0),
			BPF_ALU64_REG(BPF_ADD, R1, R1),
			BPF_ALU64_REG(BPF_ADD, R1, R2),
			BPF_ALU64_REG(BPF_ADD, R1, R3),
			BPF_ALU64_REG(BPF_ADD, R1, R4),
			BPF_ALU64_REG(BPF_ADD, R1, R5),
			BPF_ALU64_REG(BPF_ADD, R1, R6),
			BPF_ALU64_REG(BPF_ADD, R1, R7),
			BPF_ALU64_REG(BPF_ADD, R1, R8),
			BPF_ALU64_REG(BPF_ADD, R1, R9), /* R1 == 456 */
			BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_ADD, R2, R0),
			BPF_ALU64_REG(BPF_ADD, R2, R1),
			BPF_ALU64_REG(BPF_ADD, R2, R2),
			BPF_ALU64_REG(BPF_ADD, R2, R3),
			BPF_ALU64_REG(BPF_ADD, R2, R4),
			BPF_ALU64_REG(BPF_ADD, R2, R5),
			BPF_ALU64_REG(BPF_ADD, R2, R6),
			BPF_ALU64_REG(BPF_ADD, R2, R7),
			BPF_ALU64_REG(BPF_ADD, R2, R8),
			BPF_ALU64_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
			BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_ADD, R3, R0),
			BPF_ALU64_REG(BPF_ADD, R3, R1),
			BPF_ALU64_REG(BPF_ADD, R3, R2),
			BPF_ALU64_REG(BPF_ADD, R3, R3),
			BPF_ALU64_REG(BPF_ADD, R3, R4),
			BPF_ALU64_REG(BPF_ADD, R3, R5),
			BPF_ALU64_REG(BPF_ADD, R3, R6),
			BPF_ALU64_REG(BPF_ADD, R3, R7),
			BPF_ALU64_REG(BPF_ADD, R3, R8),
			BPF_ALU64_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
			BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_ADD, R4, R0),
			BPF_ALU64_REG(BPF_ADD, R4, R1),
			BPF_ALU64_REG(BPF_ADD, R4, R2),
			BPF_ALU64_REG(BPF_ADD, R4, R3),
			BPF_ALU64_REG(BPF_ADD, R4, R4),
			BPF_ALU64_REG(BPF_ADD, R4, R5),
			BPF_ALU64_REG(BPF_ADD, R4, R6),
			BPF_ALU64_REG(BPF_ADD, R4, R7),
			BPF_ALU64_REG(BPF_ADD, R4, R8),
			BPF_ALU64_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
			BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_ADD, R5, R0),
			BPF_ALU64_REG(BPF_ADD, R5, R1),
			BPF_ALU64_REG(BPF_ADD, R5, R2),
			BPF_ALU64_REG(BPF_ADD, R5, R3),
			BPF_ALU64_REG(BPF_ADD, R5, R4),
			BPF_ALU64_REG(BPF_ADD, R5, R5),
			BPF_ALU64_REG(BPF_ADD, R5, R6),
			BPF_ALU64_REG(BPF_ADD, R5, R7),
			BPF_ALU64_REG(BPF_ADD, R5, R8),
			BPF_ALU64_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
			BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_ADD, R6, R0),
			BPF_ALU64_REG(BPF_ADD, R6, R1),
			BPF_ALU64_REG(BPF_ADD, R6, R2),
			BPF_ALU64_REG(BPF_ADD, R6, R3),
			BPF_ALU64_REG(BPF_ADD, R6, R4),
			BPF_ALU64_REG(BPF_ADD, R6, R5),
			BPF_ALU64_REG(BPF_ADD, R6, R6),
			BPF_ALU64_REG(BPF_ADD, R6, R7),
			BPF_ALU64_REG(BPF_ADD, R6, R8),
			BPF_ALU64_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
			BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_ADD, R7, R0),
			BPF_ALU64_REG(BPF_ADD, R7, R1),
			BPF_ALU64_REG(BPF_ADD, R7, R2),
			BPF_ALU64_REG(BPF_ADD, R7, R3),
			BPF_ALU64_REG(BPF_ADD, R7, R4),
			BPF_ALU64_REG(BPF_ADD, R7, R5),
			BPF_ALU64_REG(BPF_ADD, R7, R6),
			BPF_ALU64_REG(BPF_ADD, R7, R7),
			BPF_ALU64_REG(BPF_ADD, R7, R8),
			BPF_ALU64_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
			BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_ADD, R8, R0),
			BPF_ALU64_REG(BPF_ADD, R8, R1),
			BPF_ALU64_REG(BPF_ADD, R8, R2),
			BPF_ALU64_REG(BPF_ADD, R8, R3),
			BPF_ALU64_REG(BPF_ADD, R8, R4),
			BPF_ALU64_REG(BPF_ADD, R8, R5),
			BPF_ALU64_REG(BPF_ADD, R8, R6),
			BPF_ALU64_REG(BPF_ADD, R8, R7),
			BPF_ALU64_REG(BPF_ADD, R8, R8),
			BPF_ALU64_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
			BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_ADD, R9, R0),
			BPF_ALU64_REG(BPF_ADD, R9, R1),
			BPF_ALU64_REG(BPF_ADD, R9, R2),
			BPF_ALU64_REG(BPF_ADD, R9, R3),
			BPF_ALU64_REG(BPF_ADD, R9, R4),
			BPF_ALU64_REG(BPF_ADD, R9, R5),
			BPF_ALU64_REG(BPF_ADD, R9, R6),
			BPF_ALU64_REG(BPF_ADD, R9, R7),
			BPF_ALU64_REG(BPF_ADD, R9, R8),
			BPF_ALU64_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
			BPF_ALU64_REG(BPF_MOV, R0, R9),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2957380 } }
	},
	{
		"INT: ADD 32-bit",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, 20),
			BPF_ALU32_IMM(BPF_MOV, R1, 1),
			BPF_ALU32_IMM(BPF_MOV, R2, 2),
			BPF_ALU32_IMM(BPF_MOV, R3, 3),
			BPF_ALU32_IMM(BPF_MOV, R4, 4),
			BPF_ALU32_IMM(BPF_MOV, R5, 5),
			BPF_ALU32_IMM(BPF_MOV, R6, 6),
			BPF_ALU32_IMM(BPF_MOV, R7, 7),
			BPF_ALU32_IMM(BPF_MOV, R8, 8),
			BPF_ALU32_IMM(BPF_MOV, R9, 9),
			BPF_ALU64_IMM(BPF_ADD, R1, 10),
			BPF_ALU64_IMM(BPF_ADD, R2, 10),
			BPF_ALU64_IMM(BPF_ADD, R3, 10),
			BPF_ALU64_IMM(BPF_ADD, R4, 10),
			BPF_ALU64_IMM(BPF_ADD, R5, 10),
			BPF_ALU64_IMM(BPF_ADD, R6, 10),
			BPF_ALU64_IMM(BPF_ADD, R7, 10),
			BPF_ALU64_IMM(BPF_ADD, R8, 10),
			BPF_ALU64_IMM(BPF_ADD, R9, 10),
			BPF_ALU32_REG(BPF_ADD, R0, R1),
			BPF_ALU32_REG(BPF_ADD, R0, R2),
			BPF_ALU32_REG(BPF_ADD, R0, R3),
			BPF_ALU32_REG(BPF_ADD, R0, R4),
			BPF_ALU32_REG(BPF_ADD, R0, R5),
			BPF_ALU32_REG(BPF_ADD, R0, R6),
			BPF_ALU32_REG(BPF_ADD, R0, R7),
			BPF_ALU32_REG(BPF_ADD, R0, R8),
			BPF_ALU32_REG(BPF_ADD, R0, R9), /* R0 == 155 */
			BPF_JMP_IMM(BPF_JEQ, R0, 155, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_REG(BPF_ADD, R1, R0),
			BPF_ALU32_REG(BPF_ADD, R1, R1),
			BPF_ALU32_REG(BPF_ADD, R1, R2),
			BPF_ALU32_REG(BPF_ADD, R1, R3),
			BPF_ALU32_REG(BPF_ADD, R1, R4),
			BPF_ALU32_REG(BPF_ADD, R1, R5),
			BPF_ALU32_REG(BPF_ADD, R1, R6),
			BPF_ALU32_REG(BPF_ADD, R1, R7),
			BPF_ALU32_REG(BPF_ADD, R1, R8),
			BPF_ALU32_REG(BPF_ADD, R1, R9), /* R1 == 456 */
			BPF_JMP_IMM(BPF_JEQ, R1, 456, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_REG(BPF_ADD, R2, R0),
			BPF_ALU32_REG(BPF_ADD, R2, R1),
			BPF_ALU32_REG(BPF_ADD, R2, R2),
			BPF_ALU32_REG(BPF_ADD, R2, R3),
			BPF_ALU32_REG(BPF_ADD, R2, R4),
			BPF_ALU32_REG(BPF_ADD, R2, R5),
			BPF_ALU32_REG(BPF_ADD, R2, R6),
			BPF_ALU32_REG(BPF_ADD, R2, R7),
			BPF_ALU32_REG(BPF_ADD, R2, R8),
			BPF_ALU32_REG(BPF_ADD, R2, R9), /* R2 == 1358 */
			BPF_JMP_IMM(BPF_JEQ, R2, 1358, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_REG(BPF_ADD, R3, R0),
			BPF_ALU32_REG(BPF_ADD, R3, R1),
			BPF_ALU32_REG(BPF_ADD, R3, R2),
			BPF_ALU32_REG(BPF_ADD, R3, R3),
			BPF_ALU32_REG(BPF_ADD, R3, R4),
			BPF_ALU32_REG(BPF_ADD, R3, R5),
			BPF_ALU32_REG(BPF_ADD, R3, R6),
			BPF_ALU32_REG(BPF_ADD, R3, R7),
			BPF_ALU32_REG(BPF_ADD, R3, R8),
			BPF_ALU32_REG(BPF_ADD, R3, R9), /* R3 == 4063 */
			BPF_JMP_IMM(BPF_JEQ, R3, 4063, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_REG(BPF_ADD, R4, R0),
			BPF_ALU32_REG(BPF_ADD, R4, R1),
			BPF_ALU32_REG(BPF_ADD, R4, R2),
			BPF_ALU32_REG(BPF_ADD, R4, R3),
			BPF_ALU32_REG(BPF_ADD, R4, R4),
			BPF_ALU32_REG(BPF_ADD, R4, R5),
			BPF_ALU32_REG(BPF_ADD, R4, R6),
			BPF_ALU32_REG(BPF_ADD, R4, R7),
			BPF_ALU32_REG(BPF_ADD, R4, R8),
			BPF_ALU32_REG(BPF_ADD, R4, R9), /* R4 == 12177 */
			BPF_JMP_IMM(BPF_JEQ, R4, 12177, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_REG(BPF_ADD, R5, R0),
			BPF_ALU32_REG(BPF_ADD, R5, R1),
			BPF_ALU32_REG(BPF_ADD, R5, R2),
			BPF_ALU32_REG(BPF_ADD, R5, R3),
			BPF_ALU32_REG(BPF_ADD, R5, R4),
			BPF_ALU32_REG(BPF_ADD, R5, R5),
			BPF_ALU32_REG(BPF_ADD, R5, R6),
			BPF_ALU32_REG(BPF_ADD, R5, R7),
			BPF_ALU32_REG(BPF_ADD, R5, R8),
			BPF_ALU32_REG(BPF_ADD, R5, R9), /* R5 == 36518 */
			BPF_JMP_IMM(BPF_JEQ, R5, 36518, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_REG(BPF_ADD, R6, R0),
			BPF_ALU32_REG(BPF_ADD, R6, R1),
			BPF_ALU32_REG(BPF_ADD, R6, R2),
			BPF_ALU32_REG(BPF_ADD, R6, R3),
			BPF_ALU32_REG(BPF_ADD, R6, R4),
			BPF_ALU32_REG(BPF_ADD, R6, R5),
			BPF_ALU32_REG(BPF_ADD, R6, R6),
			BPF_ALU32_REG(BPF_ADD, R6, R7),
			BPF_ALU32_REG(BPF_ADD, R6, R8),
			BPF_ALU32_REG(BPF_ADD, R6, R9), /* R6 == 109540 */
			BPF_JMP_IMM(BPF_JEQ, R6, 109540, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_REG(BPF_ADD, R7, R0),
			BPF_ALU32_REG(BPF_ADD, R7, R1),
			BPF_ALU32_REG(BPF_ADD, R7, R2),
			BPF_ALU32_REG(BPF_ADD, R7, R3),
			BPF_ALU32_REG(BPF_ADD, R7, R4),
			BPF_ALU32_REG(BPF_ADD, R7, R5),
			BPF_ALU32_REG(BPF_ADD, R7, R6),
			BPF_ALU32_REG(BPF_ADD, R7, R7),
			BPF_ALU32_REG(BPF_ADD, R7, R8),
			BPF_ALU32_REG(BPF_ADD, R7, R9), /* R7 == 328605 */
			BPF_JMP_IMM(BPF_JEQ, R7, 328605, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_REG(BPF_ADD, R8, R0),
			BPF_ALU32_REG(BPF_ADD, R8, R1),
			BPF_ALU32_REG(BPF_ADD, R8, R2),
			BPF_ALU32_REG(BPF_ADD, R8, R3),
			BPF_ALU32_REG(BPF_ADD, R8, R4),
			BPF_ALU32_REG(BPF_ADD, R8, R5),
			BPF_ALU32_REG(BPF_ADD, R8, R6),
			BPF_ALU32_REG(BPF_ADD, R8, R7),
			BPF_ALU32_REG(BPF_ADD, R8, R8),
			BPF_ALU32_REG(BPF_ADD, R8, R9), /* R8 == 985799 */
			BPF_JMP_IMM(BPF_JEQ, R8, 985799, 1),
			BPF_EXIT_INSN(),
			BPF_ALU32_REG(BPF_ADD, R9, R0),
			BPF_ALU32_REG(BPF_ADD, R9, R1),
			BPF_ALU32_REG(BPF_ADD, R9, R2),
			BPF_ALU32_REG(BPF_ADD, R9, R3),
			BPF_ALU32_REG(BPF_ADD, R9, R4),
			BPF_ALU32_REG(BPF_ADD, R9, R5),
			BPF_ALU32_REG(BPF_ADD, R9, R6),
			BPF_ALU32_REG(BPF_ADD, R9, R7),
			BPF_ALU32_REG(BPF_ADD, R9, R8),
			BPF_ALU32_REG(BPF_ADD, R9, R9), /* R9 == 2957380 */
			BPF_ALU32_REG(BPF_MOV, R0, R9),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2957380 } }
	},
	{	/* Mainly checking JIT here. */
		"INT: SUB",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, 0),
			BPF_ALU64_IMM(BPF_MOV, R1, 1),
			BPF_ALU64_IMM(BPF_MOV, R2, 2),
			BPF_ALU64_IMM(BPF_MOV, R3, 3),
			BPF_ALU64_IMM(BPF_MOV, R4, 4),
			BPF_ALU64_IMM(BPF_MOV, R5, 5),
			BPF_ALU64_IMM(BPF_MOV, R6, 6),
			BPF_ALU64_IMM(BPF_MOV, R7, 7),
			BPF_ALU64_IMM(BPF_MOV, R8, 8),
			BPF_ALU64_IMM(BPF_MOV, R9, 9),
			BPF_ALU64_REG(BPF_SUB, R0, R0),
			BPF_ALU64_REG(BPF_SUB, R0, R1),
			BPF_ALU64_REG(BPF_SUB, R0, R2),
			BPF_ALU64_REG(BPF_SUB, R0, R3),
			BPF_ALU64_REG(BPF_SUB, R0, R4),
			BPF_ALU64_REG(BPF_SUB, R0, R5),
			BPF_ALU64_REG(BPF_SUB, R0, R6),
			BPF_ALU64_REG(BPF_SUB, R0, R7),
			BPF_ALU64_REG(BPF_SUB, R0, R8),
			BPF_ALU64_REG(BPF_SUB, R0, R9),
			BPF_ALU64_IMM(BPF_SUB, R0, 10),
			BPF_JMP_IMM(BPF_JEQ, R0, -55, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_SUB, R1, R0),
			BPF_ALU64_REG(BPF_SUB, R1, R2),
			BPF_ALU64_REG(BPF_SUB, R1, R3),
			BPF_ALU64_REG(BPF_SUB, R1, R4),
			BPF_ALU64_REG(BPF_SUB, R1, R5),
			BPF_ALU64_REG(BPF_SUB, R1, R6),
			BPF_ALU64_REG(BPF_SUB, R1, R7),
			BPF_ALU64_REG(BPF_SUB, R1, R8),
			BPF_ALU64_REG(BPF_SUB, R1, R9),
			BPF_ALU64_IMM(BPF_SUB, R1, 10),
			BPF_ALU64_REG(BPF_SUB, R2, R0),
			BPF_ALU64_REG(BPF_SUB, R2, R1),
			BPF_ALU64_REG(BPF_SUB, R2, R3),
			BPF_ALU64_REG(BPF_SUB, R2, R4),
			BPF_ALU64_REG(BPF_SUB, R2, R5),
			BPF_ALU64_REG(BPF_SUB, R2, R6),
			BPF_ALU64_REG(BPF_SUB, R2, R7),
			BPF_ALU64_REG(BPF_SUB, R2, R8),
			BPF_ALU64_REG(BPF_SUB, R2, R9),
			BPF_ALU64_IMM(BPF_SUB, R2, 10),
			BPF_ALU64_REG(BPF_SUB, R3, R0),
			BPF_ALU64_REG(BPF_SUB, R3, R1),
			BPF_ALU64_REG(BPF_SUB, R3, R2),
			BPF_ALU64_REG(BPF_SUB, R3, R4),
			BPF_ALU64_REG(BPF_SUB, R3, R5),
			BPF_ALU64_REG(BPF_SUB, R3, R6),
			BPF_ALU64_REG(BPF_SUB, R3, R7),
			BPF_ALU64_REG(BPF_SUB, R3, R8),
			BPF_ALU64_REG(BPF_SUB, R3, R9),
			BPF_ALU64_IMM(BPF_SUB, R3, 10),
			BPF_ALU64_REG(BPF_SUB, R4, R0),
			BPF_ALU64_REG(BPF_SUB, R4, R1),
			BPF_ALU64_REG(BPF_SUB, R4, R2),
			BPF_ALU64_REG(BPF_SUB, R4, R3),
			BPF_ALU64_REG(BPF_SUB, R4, R5),
			BPF_ALU64_REG(BPF_SUB, R4, R6),
			BPF_ALU64_REG(BPF_SUB, R4, R7),
			BPF_ALU64_REG(BPF_SUB, R4, R8),
			BPF_ALU64_REG(BPF_SUB, R4, R9),
			BPF_ALU64_IMM(BPF_SUB, R4, 10),
			BPF_ALU64_REG(BPF_SUB, R5, R0),
			BPF_ALU64_REG(BPF_SUB, R5, R1),
			BPF_ALU64_REG(BPF_SUB, R5, R2),
			BPF_ALU64_REG(BPF_SUB, R5, R3),
			BPF_ALU64_REG(BPF_SUB, R5, R4),
			BPF_ALU64_REG(BPF_SUB, R5, R6),
			BPF_ALU64_REG(BPF_SUB, R5, R7),
			BPF_ALU64_REG(BPF_SUB, R5, R8),
			BPF_ALU64_REG(BPF_SUB, R5, R9),
			BPF_ALU64_IMM(BPF_SUB, R5, 10),
			BPF_ALU64_REG(BPF_SUB, R6, R0),
			BPF_ALU64_REG(BPF_SUB, R6, R1),
			BPF_ALU64_REG(BPF_SUB, R6, R2),
			BPF_ALU64_REG(BPF_SUB, R6, R3),
			BPF_ALU64_REG(BPF_SUB, R6, R4),
			BPF_ALU64_REG(BPF_SUB, R6, R5),
			BPF_ALU64_REG(BPF_SUB, R6, R7),
			BPF_ALU64_REG(BPF_SUB, R6, R8),
			BPF_ALU64_REG(BPF_SUB, R6, R9),
			BPF_ALU64_IMM(BPF_SUB, R6, 10),
			BPF_ALU64_REG(BPF_SUB, R7, R0),
			BPF_ALU64_REG(BPF_SUB, R7, R1),
			BPF_ALU64_REG(BPF_SUB, R7, R2),
			BPF_ALU64_REG(BPF_SUB, R7, R3),
			BPF_ALU64_REG(BPF_SUB, R7, R4),
			BPF_ALU64_REG(BPF_SUB, R7, R5),
			BPF_ALU64_REG(BPF_SUB, R7, R6),
			BPF_ALU64_REG(BPF_SUB, R7, R8),
			BPF_ALU64_REG(BPF_SUB, R7, R9),
			BPF_ALU64_IMM(BPF_SUB, R7, 10),
			BPF_ALU64_REG(BPF_SUB, R8, R0),
			BPF_ALU64_REG(BPF_SUB, R8, R1),
			BPF_ALU64_REG(BPF_SUB, R8, R2),
			BPF_ALU64_REG(BPF_SUB, R8, R3),
			BPF_ALU64_REG(BPF_SUB, R8, R4),
			BPF_ALU64_REG(BPF_SUB, R8, R5),
			BPF_ALU64_REG(BPF_SUB, R8, R6),
			BPF_ALU64_REG(BPF_SUB, R8, R7),
			BPF_ALU64_REG(BPF_SUB, R8, R9),
			BPF_ALU64_IMM(BPF_SUB, R8, 10),
			BPF_ALU64_REG(BPF_SUB, R9, R0),
			BPF_ALU64_REG(BPF_SUB, R9, R1),
			BPF_ALU64_REG(BPF_SUB, R9, R2),
			BPF_ALU64_REG(BPF_SUB, R9, R3),
			BPF_ALU64_REG(BPF_SUB, R9, R4),
			BPF_ALU64_REG(BPF_SUB, R9, R5),
			BPF_ALU64_REG(BPF_SUB, R9, R6),
			BPF_ALU64_REG(BPF_SUB, R9, R7),
			BPF_ALU64_REG(BPF_SUB, R9, R8),
			BPF_ALU64_IMM(BPF_SUB, R9, 10),
			BPF_ALU64_IMM(BPF_SUB, R0, 10),
			BPF_ALU64_IMM(BPF_NEG, R0, 0),
			BPF_ALU64_REG(BPF_SUB, R0, R1),
			BPF_ALU64_REG(BPF_SUB, R0, R2),
			BPF_ALU64_REG(BPF_SUB, R0, R3),
			BPF_ALU64_REG(BPF_SUB, R0, R4),
			BPF_ALU64_REG(BPF_SUB, R0, R5),
			BPF_ALU64_REG(BPF_SUB, R0, R6),
			BPF_ALU64_REG(BPF_SUB, R0, R7),
			BPF_ALU64_REG(BPF_SUB, R0, R8),
			BPF_ALU64_REG(BPF_SUB, R0, R9),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 11 } }
	},
	{	/* Mainly checking JIT here. */
		"INT: XOR",
		.u.insns_int = {
			BPF_ALU64_REG(BPF_SUB, R0, R0),
			BPF_ALU64_REG(BPF_XOR, R1, R1),
			BPF_JMP_REG(BPF_JEQ, R0, R1, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_IMM(BPF_MOV, R0, 10),
			BPF_ALU64_IMM(BPF_MOV, R1, -1),
			BPF_ALU64_REG(BPF_SUB, R1, R1),
			BPF_ALU64_REG(BPF_XOR, R2, R2),
			BPF_JMP_REG(BPF_JEQ, R1, R2, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_SUB, R2, R2),
			BPF_ALU64_REG(BPF_XOR, R3, R3),
			BPF_ALU64_IMM(BPF_MOV, R0, 10),
			BPF_ALU64_IMM(BPF_MOV, R1, -1),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_SUB, R3, R3),
			BPF_ALU64_REG(BPF_XOR, R4, R4),
			BPF_ALU64_IMM(BPF_MOV, R2, 1),
			BPF_ALU64_IMM(BPF_MOV, R5, -1),
			BPF_JMP_REG(BPF_JEQ, R3, R4, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_SUB, R4, R4),
			BPF_ALU64_REG(BPF_XOR, R5, R5),
			BPF_ALU64_IMM(BPF_MOV, R3, 1),
			BPF_ALU64_IMM(BPF_MOV, R7, -1),
			BPF_JMP_REG(BPF_JEQ, R5, R4, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_IMM(BPF_MOV, R5, 1),
			BPF_ALU64_REG(BPF_SUB, R5, R5),
			BPF_ALU64_REG(BPF_XOR, R6, R6),
			BPF_ALU64_IMM(BPF_MOV, R1, 1),
			BPF_ALU64_IMM(BPF_MOV, R8, -1),
			BPF_JMP_REG(BPF_JEQ, R5, R6, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_SUB, R6, R6),
			BPF_ALU64_REG(BPF_XOR, R7, R7),
			BPF_JMP_REG(BPF_JEQ, R7, R6, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_SUB, R7, R7),
			BPF_ALU64_REG(BPF_XOR, R8, R8),
			BPF_JMP_REG(BPF_JEQ, R7, R8, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_SUB, R8, R8),
			BPF_ALU64_REG(BPF_XOR, R9, R9),
			BPF_JMP_REG(BPF_JEQ, R9, R8, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_SUB, R9, R9),
			BPF_ALU64_REG(BPF_XOR, R0, R0),
			BPF_JMP_REG(BPF_JEQ, R9, R0, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_SUB, R1, R1),
			BPF_ALU64_REG(BPF_XOR, R0, R0),
			BPF_JMP_REG(BPF_JEQ, R9, R0, 2),
			BPF_ALU64_IMM(BPF_MOV, R0, 0),
			BPF_EXIT_INSN(),
			BPF_ALU64_IMM(BPF_MOV, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } }
	},
	{	/* Mainly checking JIT here. */
		"INT: MUL",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, 11),
			BPF_ALU64_IMM(BPF_MOV, R1, 1),
			BPF_ALU64_IMM(BPF_MOV, R2, 2),
			BPF_ALU64_IMM(BPF_MOV, R3, 3),
			BPF_ALU64_IMM(BPF_MOV, R4, 4),
			BPF_ALU64_IMM(BPF_MOV, R5, 5),
			BPF_ALU64_IMM(BPF_MOV, R6, 6),
			BPF_ALU64_IMM(BPF_MOV, R7, 7),
			BPF_ALU64_IMM(BPF_MOV, R8, 8),
			BPF_ALU64_IMM(BPF_MOV, R9, 9),
			BPF_ALU64_REG(BPF_MUL, R0, R0),
			BPF_ALU64_REG(BPF_MUL, R0, R1),
			BPF_ALU64_REG(BPF_MUL, R0, R2),
			BPF_ALU64_REG(BPF_MUL, R0, R3),
			BPF_ALU64_REG(BPF_MUL, R0, R4),
			BPF_ALU64_REG(BPF_MUL, R0, R5),
			BPF_ALU64_REG(BPF_MUL, R0, R6),
			BPF_ALU64_REG(BPF_MUL, R0, R7),
			BPF_ALU64_REG(BPF_MUL, R0, R8),
			BPF_ALU64_REG(BPF_MUL, R0, R9),
			BPF_ALU64_IMM(BPF_MUL, R0, 10),
			BPF_JMP_IMM(BPF_JEQ, R0, 439084800, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_MUL, R1, R0),
			BPF_ALU64_REG(BPF_MUL, R1, R2),
			BPF_ALU64_REG(BPF_MUL, R1, R3),
			BPF_ALU64_REG(BPF_MUL, R1, R4),
			BPF_ALU64_REG(BPF_MUL, R1, R5),
			BPF_ALU64_REG(BPF_MUL, R1, R6),
			BPF_ALU64_REG(BPF_MUL, R1, R7),
			BPF_ALU64_REG(BPF_MUL, R1, R8),
			BPF_ALU64_REG(BPF_MUL, R1, R9),
			BPF_ALU64_IMM(BPF_MUL, R1, 10),
			BPF_ALU64_REG(BPF_MOV, R2, R1),
			BPF_ALU64_IMM(BPF_RSH, R2, 32),
			BPF_JMP_IMM(BPF_JEQ, R2, 0x5a924, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_IMM(BPF_LSH, R1, 32),
			BPF_ALU64_IMM(BPF_ARSH, R1, 32),
			BPF_JMP_IMM(BPF_JEQ, R1, 0xebb90000, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_REG(BPF_MUL, R2, R0),
			BPF_ALU64_REG(BPF_MUL, R2, R1),
			BPF_ALU64_REG(BPF_MUL, R2, R3),
			BPF_ALU64_REG(BPF_MUL, R2, R4),
			BPF_ALU64_REG(BPF_MUL, R2, R5),
			BPF_ALU64_REG(BPF_MUL, R2, R6),
			BPF_ALU64_REG(BPF_MUL, R2, R7),
			BPF_ALU64_REG(BPF_MUL, R2, R8),
			BPF_ALU64_REG(BPF_MUL, R2, R9),
			BPF_ALU64_IMM(BPF_MUL, R2, 10),
			BPF_ALU64_IMM(BPF_RSH, R2, 32),
			BPF_ALU64_REG(BPF_MOV, R0, R2),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x35d97ef2 } }
	},
	{	/* Mainly checking JIT here. */
		"MOV REG64",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
			BPF_MOV64_REG(R1, R0),
			BPF_MOV64_REG(R2, R1),
			BPF_MOV64_REG(R3, R2),
			BPF_MOV64_REG(R4, R3),
			BPF_MOV64_REG(R5, R4),
			BPF_MOV64_REG(R6, R5),
			BPF_MOV64_REG(R7, R6),
			BPF_MOV64_REG(R8, R7),
			BPF_MOV64_REG(R9, R8),
			BPF_ALU64_IMM(BPF_MOV, R0, 0),
			BPF_ALU64_IMM(BPF_MOV, R1, 0),
			BPF_ALU64_IMM(BPF_MOV, R2, 0),
			BPF_ALU64_IMM(BPF_MOV, R3, 0),
			BPF_ALU64_IMM(BPF_MOV, R4, 0),
			BPF_ALU64_IMM(BPF_MOV, R5, 0),
			BPF_ALU64_IMM(BPF_MOV, R6, 0),
			BPF_ALU64_IMM(BPF_MOV, R7, 0),
			BPF_ALU64_IMM(BPF_MOV, R8, 0),
			BPF_ALU64_IMM(BPF_MOV, R9, 0),
			BPF_ALU64_REG(BPF_ADD, R0, R0),
			BPF_ALU64_REG(BPF_ADD, R0, R1),
			BPF_ALU64_REG(BPF_ADD, R0, R2),
			BPF_ALU64_REG(BPF_ADD, R0, R3),
			BPF_ALU64_REG(BPF_ADD, R0, R4),
			BPF_ALU64_REG(BPF_ADD, R0, R5),
			BPF_ALU64_REG(BPF_ADD, R0, R6),
			BPF_ALU64_REG(BPF_ADD, R0, R7),
			BPF_ALU64_REG(BPF_ADD, R0, R8),
			BPF_ALU64_REG(BPF_ADD, R0, R9),
			BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0xfefe } }
	},
	{	/* Mainly checking JIT here. */
		"MOV REG32",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
			BPF_MOV64_REG(R1, R0),
			BPF_MOV64_REG(R2, R1),
			BPF_MOV64_REG(R3, R2),
			BPF_MOV64_REG(R4, R3),
			BPF_MOV64_REG(R5, R4),
			BPF_MOV64_REG(R6, R5),
			BPF_MOV64_REG(R7, R6),
			BPF_MOV64_REG(R8, R7),
			BPF_MOV64_REG(R9, R8),
			BPF_ALU32_IMM(BPF_MOV, R0, 0),
			BPF_ALU32_IMM(BPF_MOV, R1, 0),
			BPF_ALU32_IMM(BPF_MOV, R2, 0),
			BPF_ALU32_IMM(BPF_MOV, R3, 0),
			BPF_ALU32_IMM(BPF_MOV, R4, 0),
			BPF_ALU32_IMM(BPF_MOV, R5, 0),
			BPF_ALU32_IMM(BPF_MOV, R6, 0),
			BPF_ALU32_IMM(BPF_MOV, R7, 0),
			BPF_ALU32_IMM(BPF_MOV, R8, 0),
			BPF_ALU32_IMM(BPF_MOV, R9, 0),
			BPF_ALU64_REG(BPF_ADD, R0, R0),
			BPF_ALU64_REG(BPF_ADD, R0, R1),
			BPF_ALU64_REG(BPF_ADD, R0, R2),
			BPF_ALU64_REG(BPF_ADD, R0, R3),
			BPF_ALU64_REG(BPF_ADD, R0, R4),
			BPF_ALU64_REG(BPF_ADD, R0, R5),
			BPF_ALU64_REG(BPF_ADD, R0, R6),
			BPF_ALU64_REG(BPF_ADD, R0, R7),
			BPF_ALU64_REG(BPF_ADD, R0, R8),
			BPF_ALU64_REG(BPF_ADD, R0, R9),
			BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0xfefe } }
	},
	{	/* Mainly checking JIT here. */
		"LD IMM64",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 0xffffffffffffffffLL),
			BPF_MOV64_REG(R1, R0),
			BPF_MOV64_REG(R2, R1),
			BPF_MOV64_REG(R3, R2),
			BPF_MOV64_REG(R4, R3),
			BPF_MOV64_REG(R5, R4),
			BPF_MOV64_REG(R6, R5),
			BPF_MOV64_REG(R7, R6),
			BPF_MOV64_REG(R8, R7),
			BPF_MOV64_REG(R9, R8),
			BPF_LD_IMM64(R0, 0x0LL),
			BPF_LD_IMM64(R1, 0x0LL),
			BPF_LD_IMM64(R2, 0x0LL),
			BPF_LD_IMM64(R3, 0x0LL),
			BPF_LD_IMM64(R4, 0x0LL),
			BPF_LD_IMM64(R5, 0x0LL),
			BPF_LD_IMM64(R6, 0x0LL),
			BPF_LD_IMM64(R7, 0x0LL),
			BPF_LD_IMM64(R8, 0x0LL),
			BPF_LD_IMM64(R9, 0x0LL),
			BPF_ALU64_REG(BPF_ADD, R0, R0),
			BPF_ALU64_REG(BPF_ADD, R0, R1),
			BPF_ALU64_REG(BPF_ADD, R0, R2),
			BPF_ALU64_REG(BPF_ADD, R0, R3),
			BPF_ALU64_REG(BPF_ADD, R0, R4),
			BPF_ALU64_REG(BPF_ADD, R0, R5),
			BPF_ALU64_REG(BPF_ADD, R0, R6),
			BPF_ALU64_REG(BPF_ADD, R0, R7),
			BPF_ALU64_REG(BPF_ADD, R0, R8),
			BPF_ALU64_REG(BPF_ADD, R0, R9),
			BPF_ALU64_IMM(BPF_ADD, R0, 0xfefe),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0xfefe } }
	},
	{
		"INT: ALU MIX",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, 11),
			BPF_ALU64_IMM(BPF_ADD, R0, -1),
			BPF_ALU64_IMM(BPF_MOV, R2, 2),
			BPF_ALU64_IMM(BPF_XOR, R2, 3),
			BPF_ALU64_REG(BPF_DIV, R0, R2),
			BPF_JMP_IMM(BPF_JEQ, R0, 10, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_IMM(BPF_MOD, R0, 3),
			BPF_JMP_IMM(BPF_JEQ, R0, 1, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_IMM(BPF_MOV, R0, -1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, -1 } }
	},
	{
		"INT: shifts by register",
		.u.insns_int = {
			BPF_MOV64_IMM(R0, -1234),
			BPF_MOV64_IMM(R1, 1),
			BPF_ALU32_REG(BPF_RSH, R0, R1),
			BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
			BPF_EXIT_INSN(),
			BPF_MOV64_IMM(R2, 1),
			BPF_ALU64_REG(BPF_LSH, R0, R2),
			BPF_MOV32_IMM(R4, -1234),
			BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
			BPF_EXIT_INSN(),
			BPF_ALU64_IMM(BPF_AND, R4, 63),
			BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
			BPF_MOV64_IMM(R3, 47),
			BPF_ALU64_REG(BPF_ARSH, R0, R3),
			BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
			BPF_EXIT_INSN(),
			BPF_MOV64_IMM(R2, 1),
			BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
			BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
			BPF_EXIT_INSN(),
			BPF_MOV64_IMM(R4, 4),
			BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
			BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
			BPF_EXIT_INSN(),
			BPF_MOV64_IMM(R4, 5),
			BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
			BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
			BPF_EXIT_INSN(),
			BPF_MOV64_IMM(R0, -1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, -1 } }
	},
#ifdef CONFIG_32BIT
	{
		"INT: 32-bit context pointer word order and zero-extension",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, 0),
			BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3),
			BPF_ALU64_IMM(BPF_RSH, R1, 32),
			BPF_JMP32_IMM(BPF_JNE, R1, 0, 1),
			BPF_ALU32_IMM(BPF_MOV, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } }
	},
#endif
	{
		"check: missing ret",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_IMM, 1),
		},
		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
		{ },
		{ },
		.fill_helper = NULL,
		.expected_errcode = -EINVAL,
	},
	{
		"check: div_k_0",
		.u.insns = {
			BPF_STMT(BPF_ALU | BPF_DIV | BPF_K, 0),
			BPF_STMT(BPF_RET | BPF_K, 0)
		},
		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
		{ },
		{ },
		.fill_helper = NULL,
		.expected_errcode = -EINVAL,
	},
	{
		"check: unknown insn",
		.u.insns = {
			/* seccomp insn, rejected in socket filter */
			BPF_STMT(BPF_LDX | BPF_W | BPF_ABS, 0),
			BPF_STMT(BPF_RET | BPF_K, 0)
		},
		CLASSIC | FLAG_EXPECTED_FAIL,
		{ },
		{ },
		.fill_helper = NULL,
		.expected_errcode = -EINVAL,
	},
	{
		"check: out of range spill/fill",
		.u.insns = {
			BPF_STMT(BPF_STX, 16),
			BPF_STMT(BPF_RET | BPF_K, 0)
		},
		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
		{ },
		{ },
		.fill_helper = NULL,
		.expected_errcode = -EINVAL,
	},
	{
		"JUMPS + HOLES",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 15),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 3, 4),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90c2894d, 1, 2),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 2, 3),
			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x2ac28349, 1, 2),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 14, 15),
			BPF_JUMP(BPF_JMP | BPF_JGE, 0, 13, 14),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 2, 3),
			BPF_JUMP(BPF_JMP | BPF_JEQ, 0x90d2ff41, 1, 2),
			BPF_STMT(BPF_LD | BPF_H | BPF_ABS, 0),
			BPF_STMT(BPF_RET | BPF_A, 0),
			BPF_STMT(BPF_RET | BPF_A, 0),
		},
		CLASSIC,
		{ 0x00, 0x1b, 0x21, 0x3c, 0x9d, 0xf8,
		  0x90, 0xe2, 0xba, 0x0a, 0x56, 0xb4,
		  0x08, 0x00,
		  0x45, 0x00, 0x00, 0x28, 0x00, 0x00,
		  0x20, 0x00, 0x40, 0x11, 0x00, 0x00, /* IP header */
		  0xc0, 0xa8, 0x33, 0x01,
		  0xc0, 0xa8, 0x33, 0x02,
		  0xbb, 0xb6,
		  0xa9, 0xfa,
		  0x00, 0x14, 0x00, 0x00,
		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
		  0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc,
		  0xcc, 0xcc, 0xcc, 0xcc },
		{ { 88, 0x001b } }
	},
	{
		"check: RET X",
		.u.insns = {
			BPF_STMT(BPF_RET | BPF_X, 0),
		},
		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
		{ },
		{ },
		.fill_helper = NULL,
		.expected_errcode = -EINVAL,
	},
	{
		"check: LDX + RET X",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_IMM, 42),
			BPF_STMT(BPF_RET | BPF_X, 0),
		},
		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
		{ },
		{ },
		.fill_helper = NULL,
		.expected_errcode = -EINVAL,
	},
	{	/* Mainly checking JIT here. */
		"M[]: alt STX + LDX",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_IMM, 100),
			BPF_STMT(BPF_STX, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 0),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 1),
			BPF_STMT(BPF_LDX | BPF_MEM, 1),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 2),
			BPF_STMT(BPF_LDX | BPF_MEM, 2),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 3),
			BPF_STMT(BPF_LDX | BPF_MEM, 3),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 4),
			BPF_STMT(BPF_LDX | BPF_MEM, 4),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 5),
			BPF_STMT(BPF_LDX | BPF_MEM, 5),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 6),
			BPF_STMT(BPF_LDX | BPF_MEM, 6),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 7),
			BPF_STMT(BPF_LDX | BPF_MEM, 7),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 8),
			BPF_STMT(BPF_LDX | BPF_MEM, 8),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 9),
			BPF_STMT(BPF_LDX | BPF_MEM, 9),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 10),
			BPF_STMT(BPF_LDX | BPF_MEM, 10),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 11),
			BPF_STMT(BPF_LDX | BPF_MEM, 11),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 12),
			BPF_STMT(BPF_LDX | BPF_MEM, 12),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 13),
			BPF_STMT(BPF_LDX | BPF_MEM, 13),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 14),
			BPF_STMT(BPF_LDX | BPF_MEM, 14),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_STX, 15),
			BPF_STMT(BPF_LDX | BPF_MEM, 15),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, 1),
			BPF_STMT(BPF_MISC | BPF_TAX, 0),
			BPF_STMT(BPF_RET | BPF_A, 0),
		},
		CLASSIC | FLAG_NO_DATA,
		{ },
		{ { 0, 116 } },
	},
	{	/* Mainly checking JIT here. */
		"M[]: full STX + full LDX",
		.u.insns = {
			BPF_STMT(BPF_LDX | BPF_IMM, 0xbadfeedb),
			BPF_STMT(BPF_STX, 0),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xecabedae),
			BPF_STMT(BPF_STX, 1),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xafccfeaf),
			BPF_STMT(BPF_STX, 2),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xbffdcedc),
			BPF_STMT(BPF_STX, 3),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xfbbbdccb),
			BPF_STMT(BPF_STX, 4),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xfbabcbda),
			BPF_STMT(BPF_STX, 5),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xaedecbdb),
			BPF_STMT(BPF_STX, 6),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xadebbade),
			BPF_STMT(BPF_STX, 7),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xfcfcfaec),
			BPF_STMT(BPF_STX, 8),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xbcdddbdc),
			BPF_STMT(BPF_STX, 9),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xfeefdfac),
			BPF_STMT(BPF_STX, 10),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xcddcdeea),
			BPF_STMT(BPF_STX, 11),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xaccfaebb),
			BPF_STMT(BPF_STX, 12),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xbdcccdcf),
			BPF_STMT(BPF_STX, 13),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xaaedecde),
			BPF_STMT(BPF_STX, 14),
			BPF_STMT(BPF_LDX | BPF_IMM, 0xfaeacdad),
			BPF_STMT(BPF_STX, 15),
			BPF_STMT(BPF_LDX | BPF_MEM, 0),
			BPF_STMT(BPF_MISC | BPF_TXA, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 1),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 2),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 3),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 4),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 5),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 6),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 7),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 8),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 9),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 10),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 11),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 12),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 13),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 14),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_LDX | BPF_MEM, 15),
			BPF_STMT(BPF_ALU | BPF_ADD | BPF_X, 0),
			BPF_STMT(BPF_RET | BPF_A, 0),
		},
		CLASSIC | FLAG_NO_DATA,
		{ },
		{ { 0, 0x2a5a5e5 } },
	},
	{
		"check: SKF_AD_MAX",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF + SKF_AD_MAX),
			BPF_STMT(BPF_RET | BPF_A, 0),
		},
		CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
		{ },
		{ },
		.fill_helper = NULL,
		.expected_errcode = -EINVAL,
	},
	{	/* Passes checker but fails during runtime. */
		"LD [SKF_AD_OFF-1]",
		.u.insns = {
			BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
				 SKF_AD_OFF - 1),
			BPF_STMT(BPF_RET | BPF_K, 1),
		},
		CLASSIC,
		{ },
		{ { 1, 0 } },
	},
	{
		"load 64-bit immediate",
		.u.insns_int = {
			BPF_LD_IMM64(R1, 0x567800001234LL),
			BPF_MOV64_REG(R2, R1),
			BPF_MOV64_REG(R3, R2),
			BPF_ALU64_IMM(BPF_RSH, R2, 32),
			BPF_ALU64_IMM(BPF_LSH, R3, 32),
			BPF_ALU64_IMM(BPF_RSH, R3, 32),
			BPF_ALU64_IMM(BPF_MOV, R0, 0),
			BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
			BPF_EXIT_INSN(),
			BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
			BPF_EXIT_INSN(),
			BPF_LD_IMM64(R0, 0x1ffffffffLL),
			BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } }
	},
	/* BPF_ALU | BPF_MOV | BPF_X */
	{
		"ALU_MOV_X: dst = 2",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R1, 2),
			BPF_ALU32_REG(BPF_MOV, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2 } },
	},
	{
		"ALU_MOV_X: dst = 4294967295",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
			BPF_ALU32_REG(BPF_MOV, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 4294967295U } },
	},
	{
		"ALU64_MOV_X: dst = 2",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R1, 2),
			BPF_ALU64_REG(BPF_MOV, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2 } },
	},
	{
		"ALU64_MOV_X: dst = 4294967295",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U),
			BPF_ALU64_REG(BPF_MOV, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 4294967295U } },
	},
	/* BPF_ALU | BPF_MOV | BPF_K */
	{
		"ALU_MOV_K: dst = 2",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, 2),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2 } },
	},
	{
		"ALU_MOV_K: dst = 4294967295",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 4294967295U } },
	},
	{
		"ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
			BPF_LD_IMM64(R3, 0x00000000ffffffffLL),
			BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU_MOV_K: small negative",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, -123),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, -123 } }
	},
	{
		"ALU_MOV_K: small negative zero extension",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, -123),
			BPF_ALU64_IMM(BPF_RSH, R0, 32),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0 } }
	},
	{
		"ALU_MOV_K: large negative",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, -123456789 } }
	},
	{
		"ALU_MOV_K: large negative zero extension",
		.u.insns_int = {
			BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
			BPF_ALU64_IMM(BPF_RSH, R0, 32),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0 } }
	},
	{
		"ALU64_MOV_K: dst = 2",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, 2),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2 } },
	},
	{
		"ALU64_MOV_K: dst = 2147483647",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, 2147483647),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2147483647 } },
	},
	{
		"ALU64_OR_K: dst = 0x0",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
			BPF_LD_IMM64(R3, 0x0),
			BPF_ALU64_IMM(BPF_MOV, R2, 0x0),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU64_MOV_K: dst = -1",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
			BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU64_MOV_K: small negative",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, -123),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, -123 } }
	},
	{
		"ALU64_MOV_K: small negative sign extension",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, -123),
			BPF_ALU64_IMM(BPF_RSH, R0, 32),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0xffffffff } }
	},
	{
		"ALU64_MOV_K: large negative",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, -123456789 } }
	},
	{
		"ALU64_MOV_K: large negative sign extension",
		.u.insns_int = {
			BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
			BPF_ALU64_IMM(BPF_RSH, R0, 32),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0xffffffff } }
	},
	/* BPF_ALU | BPF_ADD | BPF_X */
	{
		"ALU_ADD_X: 1 + 2 = 3",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 1),
			BPF_ALU32_IMM(BPF_MOV, R1, 2),
			BPF_ALU32_REG(BPF_ADD, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 3 } },
	},
	{
		"ALU_ADD_X: 1 + 4294967294 = 4294967295",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 1),
			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
			BPF_ALU32_REG(BPF_ADD, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 4294967295U } },
	},
	{
		"ALU_ADD_X: 2 + 4294967294 = 0",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 2),
			BPF_LD_IMM64(R1, 4294967294U),
			BPF_ALU32_REG(BPF_ADD, R0, R1),
			BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
			BPF_ALU32_IMM(BPF_MOV, R0, 0),
			BPF_EXIT_INSN(),
			BPF_ALU32_IMM(BPF_MOV, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } },
	},
	{
		"ALU64_ADD_X: 1 + 2 = 3",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 1),
			BPF_ALU32_IMM(BPF_MOV, R1, 2),
			BPF_ALU64_REG(BPF_ADD, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 3 } },
	},
	{
		"ALU64_ADD_X: 1 + 4294967294 = 4294967295",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 1),
			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
			BPF_ALU64_REG(BPF_ADD, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 4294967295U } },
	},
	{
		"ALU64_ADD_X: 2 + 4294967294 = 4294967296",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 2),
			BPF_LD_IMM64(R1, 4294967294U),
			BPF_LD_IMM64(R2, 4294967296ULL),
			BPF_ALU64_REG(BPF_ADD, R0, R1),
			BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
			BPF_MOV32_IMM(R0, 0),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } },
	},
	/* BPF_ALU | BPF_ADD | BPF_K */
	{
		"ALU_ADD_K: 1 + 2 = 3",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 1),
			BPF_ALU32_IMM(BPF_ADD, R0, 2),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 3 } },
	},
	{
		"ALU_ADD_K: 3 + 0 = 3",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 3),
			BPF_ALU32_IMM(BPF_ADD, R0, 0),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 3 } },
	},
	{
		"ALU_ADD_K: 1 + 4294967294 = 4294967295",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 1),
			BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 4294967295U } },
	},
	{
		"ALU_ADD_K: 4294967294 + 2 = 0",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 4294967294U),
			BPF_ALU32_IMM(BPF_ADD, R0, 2),
			BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
			BPF_ALU32_IMM(BPF_MOV, R0, 0),
			BPF_EXIT_INSN(),
			BPF_ALU32_IMM(BPF_MOV, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } },
	},
	{
		"ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0x00000000ffffffff),
			BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU_ADD_K: 0 + 0xffff = 0xffff",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0xffff),
			BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0x7fffffff),
			BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0x80000000),
			BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0x80008000),
			BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU64_ADD_K: 1 + 2 = 3",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 1),
			BPF_ALU64_IMM(BPF_ADD, R0, 2),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 3 } },
	},
	{
		"ALU64_ADD_K: 3 + 0 = 3",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 3),
			BPF_ALU64_IMM(BPF_ADD, R0, 0),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 3 } },
	},
	{
		"ALU64_ADD_K: 1 + 2147483646 = 2147483647",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 1),
			BPF_ALU64_IMM(BPF_ADD, R0, 2147483646),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2147483647 } },
	},
	{
		"ALU64_ADD_K: 4294967294 + 2 = 4294967296",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 4294967294U),
			BPF_LD_IMM64(R1, 4294967296ULL),
			BPF_ALU64_IMM(BPF_ADD, R0, 2),
			BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
			BPF_ALU32_IMM(BPF_MOV, R0, 0),
			BPF_EXIT_INSN(),
			BPF_ALU32_IMM(BPF_MOV, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } },
	},
	{
		"ALU64_ADD_K: 2147483646 + -2147483647 = -1",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 2147483646),
			BPF_ALU64_IMM(BPF_ADD, R0, -2147483647),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, -1 } },
	},
	{
		"ALU64_ADD_K: 1 + 0 = 1",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x1),
			BPF_LD_IMM64(R3, 0x1),
			BPF_ALU64_IMM(BPF_ADD, R2, 0x0),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0xffffffffffffffffLL),
			BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU64_ADD_K: 0 + 0xffff = 0xffff",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0xffff),
			BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0x7fffffff),
			BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0xffffffff80000000LL),
			BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	{
		"ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x0),
			BPF_LD_IMM64(R3, 0xffffffff80008000LL),
			BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x1 } },
	},
	/* BPF_ALU | BPF_SUB | BPF_X */
	{
		"ALU_SUB_X: 3 - 1 = 2",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 3),
			BPF_ALU32_IMM(BPF_MOV, R1, 1),
			BPF_ALU32_REG(BPF_SUB, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2 } },
	},
	{
		"ALU_SUB_X: 4294967295 - 4294967294 = 1",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 4294967295U),
			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
			BPF_ALU32_REG(BPF_SUB, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } },
	},
	{
		"ALU64_SUB_X: 3 - 1 = 2",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 3),
			BPF_ALU32_IMM(BPF_MOV, R1, 1),
			BPF_ALU64_REG(BPF_SUB, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2 } },
	},
	{
		"ALU64_SUB_X: 4294967295 - 4294967294 = 1",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 4294967295U),
			BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U),
			BPF_ALU64_REG(BPF_SUB, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } },
	},
	/* BPF_ALU | BPF_SUB | BPF_K */
	{
		"ALU_SUB_K: 3 - 1 = 2",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 3),
			BPF_ALU32_IMM(BPF_SUB, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2 } },
	},
	{
		"ALU_SUB_K: 3 - 0 = 3",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 3),
			BPF_ALU32_IMM(BPF_SUB, R0, 0),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 3 } },
	},
	{
		"ALU_SUB_K: 4294967295 - 4294967294 = 1",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 4294967295U),
			BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } },
	},
	{
		"ALU64_SUB_K: 3 - 1 = 2",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 3),
			BPF_ALU64_IMM(BPF_SUB, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2 } },
	},
	{
		"ALU64_SUB_K: 3 - 0 = 3",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 3),
			BPF_ALU64_IMM(BPF_SUB, R0, 0),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 3 } },
	},
	{
		"ALU64_SUB_K: 4294967294 - 4294967295 = -1",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 4294967294U),
			BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, -1 } },
	},
	{
		"ALU64_ADD_K: 2147483646 - 2147483647 = -1",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 2147483646),
			BPF_ALU64_IMM(BPF_SUB, R0, 2147483647),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, -1 } },
	},
	/* BPF_ALU | BPF_MUL | BPF_X */
	{
		"ALU_MUL_X: 2 * 3 = 6",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 2),
			BPF_ALU32_IMM(BPF_MOV, R1, 3),
			BPF_ALU32_REG(BPF_MUL, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 6 } },
	},
	{
		"ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 2),
			BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8),
			BPF_ALU32_REG(BPF_MUL, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0xFFFFFFF0 } },
	},
	{
		"ALU_MUL_X: -1 * -1 = 1",
		.u.insns_int = {
			BPF_LD_IMM64(R0, -1),
			BPF_ALU32_IMM(BPF_MOV, R1, -1),
			BPF_ALU32_REG(BPF_MUL, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 1 } },
	},
	{
		"ALU64_MUL_X: 2 * 3 = 6",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 2),
			BPF_ALU32_IMM(BPF_MOV, R1, 3),
			BPF_ALU64_REG(BPF_MUL, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 6 } },
	},
	{
		"ALU64_MUL_X: 1 * 2147483647 = 2147483647",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 1),
			BPF_ALU32_IMM(BPF_MOV, R1, 2147483647),
			BPF_ALU64_REG(BPF_MUL, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 2147483647 } },
	},
	{
		"ALU64_MUL_X: 64x64 multiply, low word",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
			BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
			BPF_ALU64_REG(BPF_MUL, R0, R1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0xe5618cf0 } }
	},
	{
		"ALU64_MUL_X: 64x64 multiply, high word",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
			BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
			BPF_ALU64_REG(BPF_MUL, R0, R1),
			BPF_ALU64_IMM(BPF_RSH, R0, 32),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0x2236d88f } }
	},
	/* BPF_ALU | BPF_MUL | BPF_K */
	{
		"ALU_MUL_K: 2 * 3 = 6",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 2),
			BPF_ALU32_IMM(BPF_MUL, R0, 3),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 6 } },
	},
	{
		"ALU_MUL_K: 3 * 1 = 3",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 3),
			BPF_ALU32_IMM(BPF_MUL, R0, 1),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 3 } },
	},
	{
		"ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0",
		.u.insns_int = {
			BPF_LD_IMM64(R0, 2),
			BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8),
			BPF_EXIT_INSN(),
		},
		INTERNAL,
		{ },
		{ { 0, 0xFFFFFFF0 } },
	},
	{
		"ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff",
		.u.insns_int = {
			BPF_LD_IMM64(R2, 0x1),
			BPF_LD_IMM64(R3, 0x00000000ffffffff),
			BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff),
			BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
			BPF_MOV32_IMM(R0, 2),
			BPF_EXIT_INSN(),
			BPF_MOV32_IMM(R0,