#include <linux/atomic.h>
#include <linux/bpf_verifier.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/sock_diag.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
#include <linux/if_arp.h>
#include <linux/gfp.h>
#include <net/inet_common.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <linux/skmsg.h>
#include <net/sock.h>
#include <net/flow_dissector.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <linux/filter.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
#include <linux/bpf.h>
#include <linux/btf.h>
#include <net/sch_generic.h>
#include <net/cls_cgroup.h>
#include <net/dst_metadata.h>
#include <net/dst.h>
#include <net/sock_reuseport.h>
#include <net/busy_poll.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/udp.h>
#include <linux/bpf_trace.h>
#include <net/xdp_sock.h>
#include <linux/inetdevice.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
#include <net/ip_fib.h>
#include <net/nexthop.h>
#include <net/flow.h>
#include <net/arp.h>
#include <net/ipv6.h>
#include <net/net_namespace.h>
#include <linux/seg6_local.h>
#include <net/seg6.h>
#include <net/seg6_local.h>
#include <net/lwtunnel.h>
#include <net/ipv6_stubs.h>
#include <net/bpf_sk_storage.h>
#include <net/transp_v6.h>
#include <linux/btf_ids.h>
#include <net/tls.h>
#include <net/xdp.h>
#include <net/mptcp.h>
#include <net/netfilter/nf_conntrack_bpf.h>
static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id);
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
{
if (in_compat_syscall()) {
struct compat_sock_fprog f32;
if (len != sizeof(f32))
return -EINVAL;
if (copy_from_sockptr(&f32, src, sizeof(f32)))
return -EFAULT;
memset(dst, 0, sizeof(*dst));
dst->len = f32.len;
dst->filter = compat_ptr(f32.filter);
} else {
if (len != sizeof(*dst))
return -EINVAL;
if (copy_from_sockptr(dst, src, sizeof(*dst)))
return -EFAULT;
}
return 0;
}
EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
{
int err;
struct sk_filter *filter;
if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
return -ENOMEM;
}
err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
if (err)
return err;
err = security_sock_rcv_skb(sk, skb);
if (err)
return err;
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
if (filter) {
struct sock *save_sk = skb->sk;
unsigned int pkt_len;
skb->sk = sk;
pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
skb->sk = save_sk;
err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
}
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(sk_filter_trim_cap);
BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
{
return skb_get_poff(skb);
}
BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
{
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (skb->len < sizeof(struct nlattr))
return 0;
if (a > skb->len - sizeof(struct nlattr))
return 0;
nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
{
struct nlattr *nla;
if (skb_is_nonlinear(skb))
return 0;
if (skb->len < sizeof(struct nlattr))
return 0;
if (a > skb->len - sizeof(struct nlattr))
return 0;
nla = (struct nlattr *) &skb->data[a];
if (nla->nla_len > skb->len - a)
return 0;
nla = nla_find_nested(nla, x);
if (nla)
return (void *) nla - (void *) skb->data;
return 0;
}
BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{
u8 tmp, *ptr;
const int len = sizeof(tmp);
if (offset >= 0) {
if (headlen - offset >= len)
return *(u8 *)(data + offset);
if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
return tmp;
} else {
ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
if (likely(ptr))
return *(u8 *)ptr;
}
return -EFAULT;
}
BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
int, offset)
{
return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
offset);
}
BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{
__be16 tmp, *ptr;
const int len = sizeof(tmp);
if (offset >= 0) {
if (headlen - offset >= len)
return get_unaligned_be16(data + offset);
if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
return be16_to_cpu(tmp);
} else {
ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
if (likely(ptr))
return get_unaligned_be16(ptr);
}
return -EFAULT;
}
BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
int, offset)
{
return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
offset);
}
BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
data, int, headlen, int, offset)
{
__be32 tmp, *ptr;
const int len = sizeof(tmp);
if (likely(offset >= 0)) {
if (headlen - offset >= len)
return get_unaligned_be32(data + offset);
if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
return be32_to_cpu(tmp);
} else {
ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
if (likely(ptr))
return get_unaligned_be32(ptr);
}
return -EFAULT;
}
BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
int, offset)
{
return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
offset);
}
static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
struct bpf_insn *insn_buf)
{
struct bpf_insn *insn = insn_buf;
switch (skb_field) {
case SKF_AD_MARK:
BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
offsetof(struct sk_buff, mark));
break;
case SKF_AD_PKTTYPE:
*insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET);
*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
#endif
break;
case SKF_AD_QUEUE:
BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2);
*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
offsetof(struct sk_buff, queue_mapping));
break;
case SKF_AD_VLAN_TAG:
BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
offsetof(struct sk_buff, vlan_tci));
break;
case SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_all) != 4);
*insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
offsetof(struct sk_buff, vlan_all));
*insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
*insn++ = BPF_ALU32_IMM(BPF_MOV, dst_reg, 1);
break;
}
return insn - insn_buf;
}
static bool convert_bpf_extensions(struct sock_filter *fp,
struct bpf_insn **insnp)
{
struct bpf_insn *insn = *insnp;
u32 cnt;
switch (fp->k) {
case SKF_AD_OFF + SKF_AD_PROTOCOL:
BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2);
*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
offsetof(struct sk_buff, protocol));
*insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
break;
case SKF_AD_OFF + SKF_AD_PKTTYPE:
cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
insn += cnt - 1;
break;
case SKF_AD_OFF + SKF_AD_IFINDEX:
case SKF_AD_OFF + SKF_AD_HATYPE:
BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
BPF_REG_TMP, BPF_REG_CTX,
offsetof(struct sk_buff, dev));
*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
*insn++ = BPF_EXIT_INSN();
if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
offsetof(struct net_device, ifindex));
else
*insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
offsetof(struct net_device, type));
break;
case SKF_AD_OFF + SKF_AD_MARK:
cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
insn += cnt - 1;
break;
case SKF_AD_OFF + SKF_AD_RXHASH:
BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
offsetof(struct sk_buff, hash));
break;
case SKF_AD_OFF + SKF_AD_QUEUE:
cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
insn += cnt - 1;
break;
case SKF_AD_OFF + SKF_AD_VLAN_TAG:
cnt = convert_skb_access(SKF_AD_VLAN_TAG,
BPF_REG_A, BPF_REG_CTX, insn);
insn += cnt - 1;
break;
case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
BPF_REG_A, BPF_REG_CTX, insn);
insn += cnt - 1;
break;
case SKF_AD_OFF + SKF_AD_VLAN_TPID:
BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2);
*insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
offsetof(struct sk_buff, vlan_proto));
*insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
break;
case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
case SKF_AD_OFF + SKF_AD_NLATTR:
case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
case SKF_AD_OFF + SKF_AD_CPU:
case SKF_AD_OFF + SKF_AD_RANDOM:
*insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
*insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
*insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
switch (fp->k) {
case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
*insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
break;
case SKF_AD_OFF + SKF_AD_NLATTR:
*insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
break;
case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
*insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
break;
case SKF_AD_OFF + SKF_AD_CPU:
*insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
break;
case SKF_AD_OFF + SKF_AD_RANDOM:
*insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
bpf_user_rnd_init_once();
break;
}
break;
case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
*insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
break;
default:
BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
return false;
}
*insnp = insn;
return true;
}
static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
{
const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
bool endian = BPF_SIZE(fp->code) == BPF_H ||
BPF_SIZE(fp->code) == BPF_W;
bool indirect = BPF_MODE(fp->code) == BPF_IND;
const int ip_align = NET_IP_ALIGN;
struct bpf_insn *insn = *insnp;
int offset = fp->k;
if (!indirect &&
((unaligned_ok && offset >= 0) ||
(!unaligned_ok && offset >= 0 &&
offset + ip_align >= 0 &&
offset + ip_align % size == 0))) {
bool ldx_off_ok = offset <= S16_MAX;
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
if (offset)
*insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
*insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
size, 2 + endian + (!ldx_off_ok * 2));
if (ldx_off_ok) {
*insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
BPF_REG_D, offset);
} else {
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
*insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
BPF_REG_TMP, 0);
}
if (endian)
*insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
*insn++ = BPF_JMP_A(8);
}
*insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
*insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
*insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
if (!indirect) {
*insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
} else {
*insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
if (fp->k)
*insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
}
switch (BPF_SIZE(fp->code)) {
case BPF_B:
*insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
break;
case BPF_H:
*insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
break;
case BPF_W:
*insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
break;
default:
return false;
}
*insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
*insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
*insn = BPF_EXIT_INSN();
*insnp = insn;
return true;
}
static int bpf_convert_filter(struct sock_filter *prog, int len,
struct bpf_prog *new_prog, int *new_len,
bool *seen_ld_abs)
{
int new_flen = 0, pass = 0, target, i, stack_off;
struct bpf_insn *new_insn, *first_insn = NULL;
struct sock_filter *fp;
int *addrs = NULL;
u8 bpf_src;
BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
if (len <= 0 || len > BPF_MAXINSNS)
return -EINVAL;
if (new_prog) {
first_insn = new_prog->insnsi;
addrs = kcalloc(len, sizeof(*addrs),
GFP_KERNEL | __GFP_NOWARN);
if (!addrs)
return -ENOMEM;
}
do_pass:
new_insn = first_insn;
fp = prog;
if (new_prog) {
*new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
*new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
*new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
if (*seen_ld_abs) {
*new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
BPF_REG_D, BPF_REG_CTX,
offsetof(struct sk_buff, data));
*new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
offsetof(struct sk_buff, len));
*new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
offsetof(struct sk_buff, data_len));
*new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
}
} else {
new_insn += 3;
}
for (i = 0; i < len; fp++, i++) {
struct bpf_insn tmp_insns[32] = { };
struct bpf_insn *insn = tmp_insns;
if (addrs)
addrs[i] = new_insn - first_insn;
switch (fp->code) {
case BPF_ALU | BPF_ADD | BPF_X:
case BPF_ALU | BPF_ADD | BPF_K:
case BPF_ALU | BPF_SUB | BPF_X:
case BPF_ALU | BPF_SUB | BPF_K:
case BPF_ALU | BPF_AND | BPF_X:
case BPF_ALU | BPF_AND | BPF_K:
case BPF_ALU | BPF_OR | BPF_X:
case BPF_ALU | BPF_OR | BPF_K:
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU | BPF_RSH | BPF_X:
case BPF_ALU | BPF_RSH | BPF_K:
case BPF_ALU | BPF_XOR | BPF_X:
case BPF_ALU | BPF_XOR | BPF_K:
case BPF_ALU | BPF_MUL | BPF_X:
case BPF_ALU | BPF_MUL | BPF_K:
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU | BPF_MOD | BPF_K:
case BPF_ALU | BPF_NEG:
case BPF_LD | BPF_ABS | BPF_W:
case BPF_LD | BPF_ABS | BPF_H:
case BPF_LD | BPF_ABS | BPF_B:
case BPF_LD | BPF_IND | BPF_W:
case BPF_LD | BPF_IND | BPF_H:
case BPF_LD | BPF_IND | BPF_B:
if (BPF_CLASS(fp->code) == BPF_LD &&
BPF_MODE(fp->code) == BPF_ABS &&
convert_bpf_extensions(fp, &insn))
break;
if (BPF_CLASS(fp->code) == BPF_LD &&
convert_bpf_ld_abs(fp, &insn)) {
*seen_ld_abs = true;
break;
}
if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
*insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
*insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
*insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
*insn++ = BPF_EXIT_INSN();
}
*insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
break;
#define BPF_EMIT_JMP \
do { \
const s32 off_min = S16_MIN, off_max = S16_MAX; \
s32 off; \
\
if (target >= len || target < 0) \
goto err; \
off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
/* Adjust pc relative offset for 2nd or 3rd insn. */ \
off -= insn - tmp_insns; \
/* Reject anything not fitting into insn->off. */ \
if (off < off_min || off > off_max) \
goto err; \
insn->off = off; \
} while (0)
case BPF_JMP | BPF_JA:
target = i + fp->k + 1;
insn->code = fp->code;
BPF_EMIT_JMP;
break;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
*insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
insn->dst_reg = BPF_REG_A;
insn->src_reg = BPF_REG_TMP;
bpf_src = BPF_X;
} else {
insn->dst_reg = BPF_REG_A;
insn->imm = fp->k;
bpf_src = BPF_SRC(fp->code);
insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
}
if (fp->jf == 0) {
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
target = i + fp->jt + 1;
BPF_EMIT_JMP;
break;
}
if (fp->jt == 0) {
switch (BPF_OP(fp->code)) {
case BPF_JEQ:
insn->code = BPF_JMP | BPF_JNE | bpf_src;
break;
case BPF_JGT:
insn->code = BPF_JMP | BPF_JLE | bpf_src;
break;
case BPF_JGE:
insn->code = BPF_JMP | BPF_JLT | bpf_src;
break;
default:
goto jmp_rest;
}
target = i + fp->jf + 1;
BPF_EMIT_JMP;
break;
}
jmp_rest:
target = i + fp->jt + 1;
insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
BPF_EMIT_JMP;
insn++;
insn->code = BPF_JMP | BPF_JA;
target = i + fp->jf + 1;
BPF_EMIT_JMP;
break;
case BPF_LDX | BPF_MSH | BPF_B: {
struct sock_filter tmp = {
.code = BPF_LD | BPF_ABS | BPF_B,
.k = fp->k,
};
*seen_ld_abs = true;
*insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
convert_bpf_ld_abs(&tmp, &insn);
insn++;
*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
*insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
*insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
break;
}
case BPF_RET | BPF_A:
case BPF_RET | BPF_K:
if (BPF_RVAL(fp->code) == BPF_K)
*insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
0, fp->k);
*insn = BPF_EXIT_INSN();
break;
case BPF_ST:
case BPF_STX:
stack_off = fp->k * 4 + 4;
*insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
BPF_ST ? BPF_REG_A : BPF_REG_X,
-stack_off);
if (new_prog && new_prog->aux->stack_depth < stack_off)
new_prog->aux->stack_depth = stack_off;
break;
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
stack_off = fp->k * 4 + 4;
*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
BPF_REG_A : BPF_REG_X, BPF_REG_FP,
-stack_off);
break;
case BPF_LD | BPF_IMM:
case BPF_LDX | BPF_IMM:
*insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
BPF_REG_A : BPF_REG_X, fp->k);
break;
case BPF_MISC | BPF_TAX:
*insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
break;
case BPF_MISC | BPF_TXA:
*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
break;
case BPF_LD | BPF_W | BPF_LEN:
case BPF_LDX | BPF_W | BPF_LEN:
*insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
offsetof(struct sk_buff, len));
break;
case BPF_LDX | BPF_ABS | BPF_W:
*insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
break;
default:
goto err;
}
insn++;
if (new_prog)
memcpy(new_insn, tmp_insns,
sizeof(*insn) * (insn - tmp_insns));
new_insn += insn - tmp_insns;
}
if (!new_prog) {
*new_len = new_insn - first_insn;
if (*seen_ld_abs)
*new_len += 4;
return 0;
}
pass++;
if (new_flen != new_insn - first_insn) {
new_flen = new_insn - first_insn;
if (pass > 2)
goto err;
goto do_pass;
}
kfree(addrs);
BUG_ON(*new_len != new_flen);
return 0;
err:
kfree(addrs);
return -EINVAL;
}
static int check_load_and_stores(const struct sock_filter *filter, int flen)
{
u16 *masks, memvalid = 0;
int pc, ret = 0;
BUILD_BUG_ON(BPF_MEMWORDS > 16);
masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
if (!masks)
return -ENOMEM;
memset(masks, 0xff, flen * sizeof(*masks));
for (pc = 0; pc < flen; pc++) {
memvalid &= masks[pc];
switch (filter[pc].code) {
case BPF_ST:
case BPF_STX:
memvalid |= (1 << filter[pc].k);
break;
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
if (!(memvalid & (1 << filter[pc].k))) {
ret = -EINVAL;
goto error;
}
break;
case BPF_JMP | BPF_JA:
masks[pc + 1 + filter[pc].k] &= memvalid;
memvalid = ~0;
break;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
masks[pc + 1 + filter[pc].jt] &= memvalid;
masks[pc + 1 + filter[pc].jf] &= memvalid;
memvalid = ~0;
break;
}
}
error:
kfree(masks);
return ret;
}
static bool chk_code_allowed(u16 code_to_probe)
{
static const bool codes[] = {
[BPF_ALU | BPF_ADD | BPF_K] = true,
[BPF_ALU | BPF_ADD | BPF_X] = true,
[BPF_ALU | BPF_SUB | BPF_K] = true,
[BPF_ALU | BPF_SUB | BPF_X] = true,
[BPF_ALU | BPF_MUL | BPF_K] = true,
[BPF_ALU | BPF_MUL | BPF_X] = true,
[BPF_ALU | BPF_DIV | BPF_K] = true,
[BPF_ALU | BPF_DIV | BPF_X] = true,
[BPF_ALU | BPF_MOD | BPF_K] = true,
[BPF_ALU | BPF_MOD | BPF_X] = true,
[BPF_ALU | BPF_AND | BPF_K] = true,
[BPF_ALU | BPF_AND | BPF_X] = true,
[BPF_ALU | BPF_OR | BPF_K] = true,
[BPF_ALU | BPF_OR | BPF_X] = true,
[BPF_ALU | BPF_XOR | BPF_K] = true,
[BPF_ALU | BPF_XOR | BPF_X] = true,
[BPF_ALU | BPF_LSH | BPF_K] = true,
[BPF_ALU | BPF_LSH | BPF_X] = true,
[BPF_ALU | BPF_RSH | BPF_K] = true,
[BPF_ALU | BPF_RSH | BPF_X] = true,
[BPF_ALU | BPF_NEG] = true,
[BPF_LD | BPF_W | BPF_ABS] = true,
[BPF_LD | BPF_H | BPF_ABS] = true,
[BPF_LD | BPF_B | BPF_ABS] = true,
[BPF_LD | BPF_W | BPF_LEN] = true,
[BPF_LD | BPF_W | BPF_IND] = true,
[BPF_LD | BPF_H | BPF_IND] = true,
[BPF_LD | BPF_B | BPF_IND] = true,
[BPF_LD | BPF_IMM] = true,
[BPF_LD | BPF_MEM] = true,
[BPF_LDX | BPF_W | BPF_LEN] = true,
[BPF_LDX | BPF_B | BPF_MSH] = true,
[BPF_LDX | BPF_IMM] = true,
[BPF_LDX | BPF_MEM] = true,
[BPF_ST] = true,
[BPF_STX] = true,
[BPF_MISC | BPF_TAX] = true,
[BPF_MISC | BPF_TXA] = true,
[BPF_RET | BPF_K] = true,
[BPF_RET | BPF_A] = true,
[BPF_JMP | BPF_JA] = true,
[BPF_JMP | BPF_JEQ | BPF_K] = true,
[BPF_JMP | BPF_JEQ | BPF_X] = true,
[BPF_JMP | BPF_JGE | BPF_K] = true,
[BPF_JMP | BPF_JGE | BPF_X] = true,
[BPF_JMP | BPF_JGT | BPF_K] = true,
[BPF_JMP | BPF_JGT | BPF_X] = true,
[BPF_JMP | BPF_JSET | BPF_K] = true,
[BPF_JMP | BPF_JSET | BPF_X] = true,
};
if (code_to_probe >= ARRAY_SIZE(codes))
return false;
return codes[code_to_probe];
}
static bool bpf_check_basics_ok(const struct sock_filter *filter,
unsigned int flen)
{
if (filter == NULL)
return false;
if (flen == 0 || flen > BPF_MAXINSNS)
return false;
return true;
}
static int bpf_check_classic(const struct sock_filter *filter,
unsigned int flen)
{
bool anc_found;
int pc;
for (pc = 0; pc < flen; pc++) {
const struct sock_filter *ftest = &filter[pc];
if (!chk_code_allowed(ftest->code))
return -EINVAL;
switch (ftest->code) {
case BPF_ALU | BPF_DIV | BPF_K:
case BPF_ALU | BPF_MOD | BPF_K:
if (ftest->k == 0)
return -EINVAL;
break;
case BPF_ALU | BPF_LSH | BPF_K:
case BPF_ALU | BPF_RSH | BPF_K:
if (ftest->k >= 32)
return -EINVAL;
break;
case BPF_LD | BPF_MEM:
case BPF_LDX | BPF_MEM:
case BPF_ST:
case BPF_STX:
if (ftest->k >= BPF_MEMWORDS)
return -EINVAL;
break;
case BPF_JMP | BPF_JA:
if (ftest->k >= (unsigned int)(flen - pc - 1))
return -EINVAL;
break;
case BPF_JMP | BPF_JEQ | BPF_K:
case BPF_JMP | BPF_JEQ | BPF_X:
case BPF_JMP | BPF_JGE | BPF_K:
case BPF_JMP | BPF_JGE | BPF_X:
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP | BPF_JSET | BPF_X:
if (pc + ftest->jt + 1 >= flen ||
pc + ftest->jf + 1 >= flen)
return -EINVAL;
break;
case BPF_LD | BPF_W | BPF_ABS:
case BPF_LD | BPF_H | BPF_ABS:
case BPF_LD | BPF_B | BPF_ABS:
anc_found = false;
if (bpf_anc_helper(ftest) & BPF_ANC)
anc_found = true;
if (anc_found == false && ftest->k >= SKF_AD_OFF)
return -EINVAL;
}
}
switch (filter[flen - 1].code) {
case BPF_RET | BPF_K:
case BPF_RET | BPF_A:
return check_load_and_stores(filter, flen);
}
return -EINVAL;
}
static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
const struct sock_fprog *fprog)
{
unsigned int fsize = bpf_classic_proglen(fprog);
struct sock_fprog_kern *fkprog;
fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
if (!fp->orig_prog)
return -ENOMEM;
fkprog = fp->orig_prog;
fkprog->len = fprog->len;
fkprog->filter = kmemdup(fp->insns, fsize,
GFP_KERNEL | __GFP_NOWARN);
if (!fkprog->filter) {
kfree(fp->orig_prog);
return -ENOMEM;
}
return 0;
}
static void bpf_release_orig_filter(struct bpf_prog *fp)
{
struct sock_fprog_kern *fprog = fp->orig_prog;
if (fprog) {
kfree(fprog->filter);
kfree(fprog);
}
}
static void __bpf_prog_release(struct bpf_prog *prog)
{
if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
bpf_prog_put(prog);
} else {
bpf_release_orig_filter(prog);
bpf_prog_free(prog);
}
}
static void __sk_filter_release(struct sk_filter *fp)
{
__bpf_prog_release(fp->prog);
kfree(fp);
}
static void sk_filter_release_rcu(struct rcu_head *rcu)
{
struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
__sk_filter_release(fp);
}
static void sk_filter_release(struct sk_filter *fp)
{
if (refcount_dec_and_test(&fp->refcnt))
call_rcu(&fp->rcu, sk_filter_release_rcu);
}
void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
{
u32 filter_size = bpf_prog_size(fp->prog->len);
atomic_sub(filter_size, &sk->sk_omem_alloc);
sk_filter_release(fp);
}
static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
u32 filter_size = bpf_prog_size(fp->prog->len);
int optmem_max = READ_ONCE(sysctl_optmem_max);
if (filter_size <= optmem_max &&
atomic_read(&sk->sk_omem_alloc) + filter_size < optmem_max) {
atomic_add(filter_size, &sk->sk_omem_alloc);
return true;
}
return false;
}
bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
if (!refcount_inc_not_zero(&fp->refcnt))
return false;
if (!__sk_filter_charge(sk, fp)) {
sk_filter_release(fp);
return false;
}
return true;
}
static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
{
struct sock_filter *old_prog;
struct bpf_prog *old_fp;
int err, new_len, old_len = fp->len;
bool seen_ld_abs = false;
BUILD_BUG_ON(sizeof(struct sock_filter) !=
sizeof(struct bpf_insn));
old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
GFP_KERNEL | __GFP_NOWARN);
if (!old_prog) {
err = -ENOMEM;
goto out_err;
}
err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
&seen_ld_abs);
if (err)
goto out_err_free;
old_fp = fp;
fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
if (!fp) {
fp = old_fp;
err = -ENOMEM;
goto out_err_free;
}
fp->len = new_len;
err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
&seen_ld_abs);
if (err)
goto out_err_free;
fp = bpf_prog_select_runtime(fp, &err);
if (err)
goto out_err_free;
kfree(old_prog);
return fp;
out_err_free:
kfree(old_prog);
out_err:
__bpf_prog_release(fp);
return ERR_PTR(err);
}
static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
bpf_aux_classic_check_t trans)
{
int err;
fp->bpf_func = NULL;
fp->jited = 0;
err = bpf_check_classic(fp->insns, fp->len);
if (err) {
__bpf_prog_release(fp);
return ERR_PTR(err);
}
if (trans) {
err = trans(fp->insns, fp->len);
if (err) {
__bpf_prog_release(fp);
return ERR_PTR(err);
}
}
bpf_jit_compile(fp);
if (!fp->jited)
fp = bpf_migrate_filter(fp);
return fp;
}
int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
{
unsigned int fsize = bpf_classic_proglen(fprog);
struct bpf_prog *fp;
if (!bpf_check_basics_ok(fprog->filter, fprog->len))
return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
if (!fp)
return -ENOMEM;
memcpy(fp->insns, fprog->filter, fsize);
fp->len = fprog->len;
fp->orig_prog = NULL;
fp = bpf_prepare_filter(fp, NULL);
if (IS_ERR(fp))
return PTR_ERR(fp);
*pfp = fp;
return 0;
}
EXPORT_SYMBOL_GPL(bpf_prog_create);
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bpf_aux_classic_check_t trans, bool save_orig)
{
unsigned int fsize = bpf_classic_proglen(fprog);
struct bpf_prog *fp;
int err;
if (!bpf_check_basics_ok(fprog->filter, fprog->len))
return -EINVAL;
fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
if (!fp)
return -ENOMEM;
if (copy_from_user(fp->insns, fprog->filter, fsize)) {
__bpf_prog_free(fp);
return -EFAULT;
}
fp->len = fprog->len;
fp->orig_prog = NULL;
if (save_orig) {
err = bpf_prog_store_orig_filter(fp, fprog);
if (err) {
__bpf_prog_free(fp);
return -ENOMEM;
}
}
fp = bpf_prepare_filter(fp, trans);
if (IS_ERR(fp))
return PTR_ERR(fp);
*pfp = fp;
return 0;
}
EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
void bpf_prog_destroy(struct bpf_prog *fp)
{
__bpf_prog_release(fp);
}
EXPORT_SYMBOL_GPL(bpf_prog_destroy);
static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
{
struct sk_filter *fp, *old_fp;
fp = kmalloc(sizeof(*fp), GFP_KERNEL);
if (!fp)
return -ENOMEM;
fp->prog = prog;
if (!__sk_filter_charge(sk, fp)) {
kfree(fp);
return -ENOMEM;
}
refcount_set(&fp->refcnt, 1);
old_fp = rcu_dereference_protected(sk->sk_filter,
lockdep_sock_is_held(sk));
rcu_assign_pointer(sk->sk_filter, fp);
if (old_fp)
sk_filter_uncharge(sk, old_fp);
return 0;
}
static
struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
{
unsigned int fsize = bpf_classic_proglen(fprog);
struct bpf_prog *prog;
int err;
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return ERR_PTR(-EPERM);
if (!bpf_check_basics_ok(fprog->filter, fprog->len))
return ERR_PTR(-EINVAL);
prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
if (!prog)
return ERR_PTR(-ENOMEM);
if (copy_from_user(prog->insns, fprog->filter, fsize)) {
__bpf_prog_free(prog);
return ERR_PTR(-EFAULT);
}
prog->len = fprog->len;
err = bpf_prog_store_orig_filter(prog, fprog);
if (err) {
__bpf_prog_free(prog);
return ERR_PTR(-ENOMEM);
}
return bpf_prepare_filter(prog, NULL);
}
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
struct bpf_prog *prog = __get_filter(fprog, sk);
int err;
if (IS_ERR(prog))
return PTR_ERR(prog);
err = __sk_attach_prog(prog, sk);
if (err < 0) {
__bpf_prog_release(prog);
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(sk_attach_filter);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
struct bpf_prog *prog = __get_filter(fprog, sk);
int err;
if (IS_ERR(prog))
return PTR_ERR(prog);
if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max))
err = -ENOMEM;
else
err = reuseport_attach_prog(sk, prog);
if (err)
__bpf_prog_release(prog);
return err;
}
static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
{
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return ERR_PTR(-EPERM);
return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
}
int sk_attach_bpf(u32 ufd, struct sock *sk)
{
struct bpf_prog *prog = __get_bpf(ufd, sk);
int err;
if (IS_ERR(prog))
return PTR_ERR(prog);
err = __sk_attach_prog(prog, sk);
if (err < 0) {
bpf_prog_put(prog);
return err;
}
return 0;
}
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
{
struct bpf_prog *prog;
int err;
if (sock_flag(sk, SOCK_FILTER_LOCKED))
return -EPERM;
prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
if (PTR_ERR(prog) == -EINVAL)
prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
if (IS_ERR(prog))
return PTR_ERR(prog);
if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
if ((sk->sk_type != SOCK_STREAM &&
sk->sk_type != SOCK_DGRAM) ||
(sk->sk_protocol != IPPROTO_UDP &&
sk->sk_protocol != IPPROTO_TCP) ||
(sk->sk_family != AF_INET &&
sk->sk_family != AF_INET6)) {
err = -ENOTSUPP;
goto err_prog_put;
}
} else {
if (bpf_prog_size(prog->len) > READ_ONCE(sysctl_optmem_max)) {
err = -ENOMEM;
goto err_prog_put;
}
}
err = reuseport_attach_prog(sk, prog);
err_prog_put:
if (err)
bpf_prog_put(prog);
return err;
}
void sk_reuseport_prog_free(struct bpf_prog *prog)
{
if (!prog)
return;
if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
bpf_prog_put(prog);
else
bpf_prog_destroy(prog);
}
struct bpf_scratchpad {
union {
__be32 diff[MAX_BPF_STACK / sizeof(__be32)];
u8 buff[MAX_BPF_STACK];
};
};
static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
static inline int __bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
return skb_ensure_writable(skb, write_len);
}
static inline int bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
int err = __bpf_try_make_writable(skb, write_len);
bpf_compute_data_pointers(skb);
return err;
}
static int bpf_try_make_head_writable(struct sk_buff *skb)
{
return bpf_try_make_writable(skb, skb_headlen(skb));
}
static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
{
if (skb_at_tc_ingress(skb))
skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
}
static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
{
if (skb_at_tc_ingress(skb))
skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
}
BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
const void *, from, u32, len, u64, flags)
{
void *ptr;
if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
return -EINVAL;
if (unlikely(offset > INT_MAX))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + len)))
return -EFAULT;
ptr = skb->data + offset;
if (flags & BPF_F_RECOMPUTE_CSUM)
__skb_postpull_rcsum(skb, ptr, len, offset);
memcpy(ptr, from, len);
if (flags & BPF_F_RECOMPUTE_CSUM)
__skb_postpush_rcsum(skb, ptr, len, offset);
if (flags & BPF_F_INVALIDATE_HASH)
skb_clear_hash(skb);
return 0;
}
static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
.func = bpf_skb_store_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg4_type = ARG_CONST_SIZE,
.arg5_type = ARG_ANYTHING,
};
int __bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from,
u32 len, u64 flags)
{
return ____bpf_skb_store_bytes(skb, offset, from, len, flags);
}
BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
void *, to, u32, len)
{
void *ptr;
if (unlikely(offset > INT_MAX))
goto err_clear;
ptr = skb_header_pointer(skb, offset, len, to);
if (unlikely(!ptr))
goto err_clear;
if (ptr != to)
memcpy(to, ptr, len);
return 0;
err_clear:
memset(to, 0, len);
return -EFAULT;
}
static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
.func = bpf_skb_load_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
.arg4_type = ARG_CONST_SIZE,
};
int __bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
{
return ____bpf_skb_load_bytes(skb, offset, to, len);
}
BPF_CALL_4(bpf_flow_dissector_load_bytes,
const struct bpf_flow_dissector *, ctx, u32, offset,
void *, to, u32, len)
{
void *ptr;
if (unlikely(offset > 0xffff))
goto err_clear;
if (unlikely(!ctx->skb))
goto err_clear;
ptr = skb_header_pointer(ctx->skb, offset, len, to);
if (unlikely(!ptr))
goto err_clear;
if (ptr != to)
memcpy(to, ptr, len);
return 0;
err_clear:
memset(to, 0, len);
return -EFAULT;
}
static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
.func = bpf_flow_dissector_load_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
.arg4_type = ARG_CONST_SIZE,
};
BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
u32, offset, void *, to, u32, len, u32, start_header)
{
u8 *end = skb_tail_pointer(skb);
u8 *start, *ptr;
if (unlikely(offset > 0xffff))
goto err_clear;
switch (start_header) {
case BPF_HDR_START_MAC:
if (unlikely(!skb_mac_header_was_set(skb)))
goto err_clear;
start = skb_mac_header(skb);
break;
case BPF_HDR_START_NET:
start = skb_network_header(skb);
break;
default:
goto err_clear;
}
ptr = start + offset;
if (likely(ptr + len <= end)) {
memcpy(to, ptr, len);
return 0;
}
err_clear:
memset(to, 0, len);
return -EFAULT;
}
static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
.func = bpf_skb_load_bytes_relative,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
.arg4_type = ARG_CONST_SIZE,
.arg5_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
{
return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
}
static const struct bpf_func_proto bpf_skb_pull_data_proto = {
.func = bpf_skb_pull_data,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
{
return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
}
static const struct bpf_func_proto bpf_sk_fullsock_proto = {
.func = bpf_sk_fullsock,
.gpl_only = false,
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_SOCK_COMMON,
};
static inline int sk_skb_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
return __bpf_try_make_writable(skb, write_len);
}
BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
{
return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
}
static const struct bpf_func_proto sk_skb_pull_data_proto = {
.func = sk_skb_pull_data,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
u64, from, u64, to, u64, flags)
{
__sum16 *ptr;
if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
return -EINVAL;
if (unlikely(offset > 0xffff || offset & 1))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
return -EFAULT;
ptr = (__sum16 *)(skb->data + offset);
switch (flags & BPF_F_HDR_FIELD_MASK) {
case 0:
if (unlikely(from != 0))
return -EINVAL;
csum_replace_by_diff(ptr, to);
break;
case 2:
csum_replace2(ptr, from, to);
break;
case 4:
csum_replace4(ptr, from, to);
break;
default:
return -EINVAL;
}
return 0;
}
static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
.func = bpf_l3_csum_replace,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
u64, from, u64, to, u64, flags)
{
bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
bool do_mforce = flags & BPF_F_MARK_ENFORCE;
__sum16 *ptr;
if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
return -EINVAL;
if (unlikely(offset > 0xffff || offset & 1))
return -EFAULT;
if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
return -EFAULT;
ptr = (__sum16 *)(skb->data + offset);
if (is_mmzero && !do_mforce && !*ptr)
return 0;
switch (flags & BPF_F_HDR_FIELD_MASK) {
case 0:
if (unlikely(from != 0))
return -EINVAL;
inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
break;
case 2:
inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
break;
case 4:
inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
break;
default:
return -EINVAL;
}
if (is_mmzero && !*ptr)
*ptr = CSUM_MANGLED_0;
return 0;
}
static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
.func = bpf_l4_csum_replace,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
};
BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
__be32 *, to, u32, to_size, __wsum, seed)
{
struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
u32 diff_size = from_size + to_size;
int i, j = 0;
if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
diff_size > sizeof(sp->diff)))
return -EINVAL;
for (i = 0; i < from_size / sizeof(__be32); i++, j++)
sp->diff[j] = ~from[i];
for (i = 0; i < to_size / sizeof(__be32); i++, j++)
sp->diff[j] = to[i];
return csum_partial(sp->diff, diff_size, seed);
}
static const struct bpf_func_proto bpf_csum_diff_proto = {
.func = bpf_csum_diff,
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
.arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
.arg5_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
{
if (skb->ip_summed == CHECKSUM_COMPLETE)
return (skb->csum = csum_add(skb->csum, csum));
return -ENOTSUPP;
}
static const struct bpf_func_proto bpf_csum_update_proto = {
.func = bpf_csum_update,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level)
{
switch (level) {
case BPF_CSUM_LEVEL_INC:
__skb_incr_checksum_unnecessary(skb);
break;
case BPF_CSUM_LEVEL_DEC:
__skb_decr_checksum_unnecessary(skb);
break;
case BPF_CSUM_LEVEL_RESET:
__skb_reset_checksum_unnecessary(skb);
break;
case BPF_CSUM_LEVEL_QUERY:
return skb->ip_summed == CHECKSUM_UNNECESSARY ?
skb->csum_level : -EACCES;
default:
return -EINVAL;
}
return 0;
}
static const struct bpf_func_proto bpf_csum_level_proto = {
.func = bpf_csum_level,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
{
return dev_forward_skb_nomtu(dev, skb);
}
static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
struct sk_buff *skb)
{
int ret = ____dev_forward_skb(dev, skb, false);
if (likely(!ret)) {
skb->dev = dev;
ret = netif_rx(skb);
}
return ret;
}
static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
{
int ret;
if (dev_xmit_recursion()) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
}
skb->dev = dev;
skb_set_redirected_noclear(skb, skb_at_tc_ingress(skb));
skb_clear_tstamp(skb);
dev_xmit_recursion_inc();
ret = dev_queue_xmit(skb);
dev_xmit_recursion_dec();
return ret;
}
static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
unsigned int mlen = skb_network_offset(skb);
if (unlikely(skb->len <= mlen)) {
kfree_skb(skb);
return -ERANGE;
}
if (mlen) {
__skb_pull(skb, mlen);
if (!skb_at_tc_ingress(skb))
skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
}
skb_pop_mac_header(skb);
skb_reset_mac_len(skb);
return flags & BPF_F_INGRESS ?
__bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
}
static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
if (unlikely(skb->mac_header >= skb->network_header || skb->len == 0)) {
kfree_skb(skb);
return -ERANGE;
}
bpf_push_mac_rcsum(skb);
return flags & BPF_F_INGRESS ?
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}
static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
u32 flags)
{
if (dev_is_mac_header_xmit(dev))
return __bpf_redirect_common(skb, dev, flags);
else
return __bpf_redirect_no_mac(skb, dev, flags);
}
#if IS_ENABLED(CONFIG_IPV6)
static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
struct net_device *dev, struct bpf_nh_params *nh)
{
u32 hh_len = LL_RESERVED_SPACE(dev);
const struct in6_addr *nexthop;
struct dst_entry *dst = NULL;
struct neighbour *neigh;
if (dev_xmit_recursion()) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
goto out_drop;
}
skb->dev = dev;
skb_clear_tstamp(skb);
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
skb = skb_expand_head(skb, hh_len);
if (!skb)
return -ENOMEM;
}
rcu_read_lock();
if (!nh) {
dst = skb_dst(skb);
nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
&ipv6_hdr(skb)->daddr);
} else {
nexthop = &nh->ipv6_nh;
}
neigh = ip_neigh_gw6(dev, nexthop);
if (likely(!IS_ERR(neigh))) {
int ret;
sock_confirm_neigh(skb, neigh);
local_bh_disable();
dev_xmit_recursion_inc();
ret = neigh_output(neigh, skb, false);
dev_xmit_recursion_dec();
local_bh_enable();
rcu_read_unlock();
return ret;
}
rcu_read_unlock_bh();
if (dst)
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
out_drop:
kfree_skb(skb);
return -ENETDOWN;
}
static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
struct net *net = dev_net(dev);
int err, ret = NET_XMIT_DROP;
if (!nh) {
struct dst_entry *dst;
struct flowi6 fl6 = {
.flowi6_flags = FLOWI_FLAG_ANYSRC,
.flowi6_mark = skb->mark,
.flowlabel = ip6_flowinfo(ip6h),
.flowi6_oif = dev->ifindex,
.flowi6_proto = ip6h->nexthdr,
.daddr = ip6h->daddr,
.saddr = ip6h->saddr,
};
dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
if (IS_ERR(dst))
goto out_drop;
skb_dst_set(skb, dst);
} else if (nh->nh_family != AF_INET6) {
goto out_drop;
}
err = bpf_out_neigh_v6(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
dev->stats.tx_errors++;
kfree_skb(skb);
out_xmit:
return ret;
}
#else
static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{
kfree_skb(skb);
return NET_XMIT_DROP;
}
#endif /* CONFIG_IPV6 */
#if IS_ENABLED(CONFIG_INET)
static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
struct net_device *dev, struct bpf_nh_params *nh)
{
u32 hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
bool is_v6gw = false;
if (dev_xmit_recursion()) {
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
goto out_drop;
}
skb->dev = dev;
skb_clear_tstamp(skb);
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
skb = skb_expand_head(skb, hh_len);
if (!skb)
return -ENOMEM;
}
rcu_read_lock();
if (!nh) {
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = container_of(dst, struct rtable, dst);
neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
} else if (nh->nh_family == AF_INET6) {
neigh = ip_neigh_gw6(dev, &nh->ipv6_nh);
is_v6gw = true;
} else if (nh->nh_family == AF_INET) {
neigh = ip_neigh_gw4(dev, nh->ipv4_nh);
} else {
rcu_read_unlock();
goto out_drop;
}
if (likely(!IS_ERR(neigh))) {
int ret;
sock_confirm_neigh(skb, neigh);
local_bh_disable();
dev_xmit_recursion_inc();
ret = neigh_output(neigh, skb, is_v6gw);
dev_xmit_recursion_dec();
local_bh_enable();
rcu_read_unlock();
return ret;
}
rcu_read_unlock();
out_drop:
kfree_skb(skb);
return -ENETDOWN;
}
static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{
const struct iphdr *ip4h = ip_hdr(skb);
struct net *net = dev_net(dev);
int err, ret = NET_XMIT_DROP;
if (!nh) {
struct flowi4 fl4 = {
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_oif = dev->ifindex,
.flowi4_proto = ip4h->protocol,
.daddr = ip4h->daddr,
.saddr = ip4h->saddr,
};
struct rtable *rt;
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto out_drop;
if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
ip_rt_put(rt);
goto out_drop;
}
skb_dst_set(skb, &rt->dst);
}
err = bpf_out_neigh_v4(net, skb, dev, nh);
if (unlikely(net_xmit_eval(err)))
dev->stats.tx_errors++;
else
ret = NET_XMIT_SUCCESS;
goto out_xmit;
out_drop:
dev->stats.tx_errors++;
kfree_skb(skb);
out_xmit:
return ret;
}
#else
static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{
kfree_skb(skb);
return NET_XMIT_DROP;
}
#endif /* CONFIG_INET */
static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev,
struct bpf_nh_params *nh)
{
struct ethhdr *ethh = eth_hdr(skb);
if (unlikely(skb->mac_header >= skb->network_header))
goto out;
bpf_push_mac_rcsum(skb);
if (is_multicast_ether_addr(ethh->h_dest))
goto out;
skb_pull(skb, sizeof(*ethh));
skb_unset_mac_header(skb);
skb_reset_network_header(skb);
if (skb->protocol == htons(ETH_P_IP))
return __bpf_redirect_neigh_v4(skb, dev, nh);
else if (skb->protocol == htons(ETH_P_IPV6))
return __bpf_redirect_neigh_v6(skb, dev, nh);
out:
kfree_skb(skb);
return -ENOTSUPP;
}
enum {
BPF_F_NEIGH = (1ULL << 1),
BPF_F_PEER = (1ULL << 2),
BPF_F_NEXTHOP = (1ULL << 3),
#define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP)
};
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
{
struct net_device *dev;
struct sk_buff *clone;
int ret;
if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return -EINVAL;
dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
if (unlikely(!dev))
return -EINVAL;
clone = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!clone))
return -ENOMEM;
ret = bpf_try_make_head_writable(skb);
if (unlikely(ret)) {
kfree_skb(clone);
return -ENOMEM;
}
return __bpf_redirect(clone, dev, flags);
}
static const struct bpf_func_proto bpf_clone_redirect_proto = {
.func = bpf_clone_redirect,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
int skb_do_redirect(struct sk_buff *skb)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct net *net = dev_net(skb->dev);
struct net_device *dev;
u32 flags = ri->flags;
dev = dev_get_by_index_rcu(net, ri->tgt_index);
ri->tgt_index = 0;
ri->flags = 0;
if (unlikely(!dev))
goto out_drop;
if (flags & BPF_F_PEER) {
const struct net_device_ops *ops = dev->netdev_ops;
if (unlikely(!ops->ndo_get_peer_dev ||
!skb_at_tc_ingress(skb)))
goto out_drop;
dev = ops->ndo_get_peer_dev(dev);
if (unlikely(!dev ||
!(dev->flags & IFF_UP) ||
net_eq(net, dev_net(dev))))
goto out_drop;
skb->dev = dev;
return -EAGAIN;
}
return flags & BPF_F_NEIGH ?
__bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ?
&ri->nh : NULL) :
__bpf_redirect(skb, dev, flags);
out_drop:
kfree_skb(skb);
return -EINVAL;
}
BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return TC_ACT_SHOT;
ri->flags = flags;
ri->tgt_index = ifindex;
return TC_ACT_REDIRECT;
}
static const struct bpf_func_proto bpf_redirect_proto = {
.func = bpf_redirect,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
if (unlikely(flags))
return TC_ACT_SHOT;
ri->flags = BPF_F_PEER;
ri->tgt_index = ifindex;
return TC_ACT_REDIRECT;
}
static const struct bpf_func_proto bpf_redirect_peer_proto = {
.func = bpf_redirect_peer,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
int, plen, u64, flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
if (unlikely((plen && plen < sizeof(*params)) || flags))
return TC_ACT_SHOT;
ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0);
ri->tgt_index = ifindex;
BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params));
if (plen)
memcpy(&ri->nh, params, sizeof(ri->nh));
return TC_ACT_REDIRECT;
}
static const struct bpf_func_proto bpf_redirect_neigh_proto = {
.func = bpf_redirect_neigh,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
.arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
{
msg->apply_bytes = bytes;
return 0;
}
static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
.func = bpf_msg_apply_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
{
msg->cork_bytes = bytes;
return 0;
}
static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
.func = bpf_msg_cork_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
u32, end, u64, flags)
{
u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
u32 first_sge, last_sge, i, shift, bytes_sg_total;
struct scatterlist *sge;
u8 *raw, *to, *from;
struct page *page;
if (unlikely(flags || end <= start))
return -EINVAL;
i = msg->sg.start;
do {
offset += len;
len = sk_msg_elem(msg, i)->length;
if (start < offset + len)
break;
sk_msg_iter_var_next(i);
} while (i != msg->sg.end);
if (unlikely(start >= offset + len))
return -EINVAL;
first_sge = i;
bytes_sg_total = start - offset + bytes;
if (!test_bit(i, msg->sg.copy) && bytes_sg_total <= len)
goto out;
do {
copy += sk_msg_elem(msg, i)->length;
sk_msg_iter_var_next(i);
if (bytes_sg_total <= copy)
break;
} while (i != msg->sg.end);
last_sge = i;
if (unlikely(bytes_sg_total > copy))
return -EINVAL;
page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
get_order(copy));
if (unlikely(!page))
return -ENOMEM;
raw = page_address(page);
i = first_sge;
do {
sge = sk_msg_elem(msg, i);
from = sg_virt(sge);
len = sge->length;
to = raw + poffset;
memcpy(to, from, len);
poffset += len;
sge->length = 0;
put_page(sg_page(sge));
sk_msg_iter_var_next(i);
} while (i != last_sge);
sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
WARN_ON_ONCE(last_sge == first_sge);
shift = last_sge > first_sge ?
last_sge - first_sge - 1 :
NR_MSG_FRAG_IDS - first_sge + last_sge - 1;
if (!shift)
goto out;
i = first_sge;
sk_msg_iter_var_next(i);
do {
u32 move_from;
if (i + shift >= NR_MSG_FRAG_IDS)
move_from = i + shift - NR_MSG_FRAG_IDS;
else
move_from = i + shift;
if (move_from == msg->sg.end)
break;
msg->sg.data[i] = msg->sg.data[move_from];
msg->sg.data[move_from].length = 0;
msg->sg.data[move_from].page_link = 0;
msg->sg.data[move_from].offset = 0;
sk_msg_iter_var_next(i);
} while (1);
msg->sg.end = msg->sg.end - shift > msg->sg.end ?
msg->sg.end - shift + NR_MSG_FRAG_IDS :
msg->sg.end - shift;
out:
msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
msg->data_end = msg->data + bytes;
return 0;
}
static const struct bpf_func_proto bpf_msg_pull_data_proto = {
.func = bpf_msg_pull_data,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
u32, len, u64, flags)
{
struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
u32 new, i = 0, l = 0, space, copy = 0, offset = 0;
u8 *raw, *to, *from;
struct page *page;
if (unlikely(flags))
return -EINVAL;
if (unlikely(len == 0))
return 0;
i = msg->sg.start;
do {
offset += l;
l = sk_msg_elem(msg, i)->length;
if (start < offset + l)
break;
sk_msg_iter_var_next(i);
} while (i != msg->sg.end);
if (start >= offset + l)
return -EINVAL;
space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
if (!space || (space == 1 && start != offset))
copy = msg->sg.data[i].length;
page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
get_order(copy + len));
if (unlikely(!page))
return -ENOMEM;
if (copy) {
int front, back;
raw = page_address(page);
psge = sk_msg_elem(msg, i);
front = start - offset;
back = psge->length - front;
from = sg_virt(psge);
if (front)
memcpy(raw, from, front);
if (back) {
from += front;
to = raw + front + len;
memcpy(to, from, back);
}
put_page(sg_page(psge));
} else if (start - offset) {
psge = sk_msg_elem(msg, i);
rsge = sk_msg_elem_cpy(msg, i);
psge->length = start - offset;
rsge.length -= psge->length;
rsge.offset += start;
sk_msg_iter_var_next(i);
sg_unmark_end(psge);
sg_unmark_end(&rsge);
sk_msg_iter_next(msg, end);
}
new = i;
if (!copy) {
sge = sk_msg_elem_cpy(msg, i);
sk_msg_iter_var_next(i);
sg_unmark_end(&sge);
sk_msg_iter_next(msg, end);
nsge = sk_msg_elem_cpy(msg, i);
if (rsge.length) {
sk_msg_iter_var_next(i);
nnsge = sk_msg_elem_cpy(msg, i);
}
while (i != msg->sg.end) {
msg->sg.data[i] = sge;
sge = nsge;
sk_msg_iter_var_next(i);
if (rsge.length) {
nsge = nnsge;
nnsge = sk_msg_elem_cpy(msg, i);
} else {
nsge = sk_msg_elem_cpy(msg, i);
}
}
}
sk_mem_charge(msg->sk, len);
msg->sg.size += len;
__clear_bit(new, msg->sg.copy);
sg_set_page(&msg->sg.data[new], page, len + copy, 0);
if (rsge.length) {
get_page(sg_page(&rsge));
sk_msg_iter_var_next(new);
msg->sg.data[new] = rsge;
}
sk_msg_compute_data_pointers(msg);
return 0;
}
static const struct bpf_func_proto bpf_msg_push_data_proto = {
.func = bpf_msg_push_data,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
};
static void sk_msg_shift_left(struct sk_msg *msg, int i)
{
int prev;
do {
prev = i;
sk_msg_iter_var_next(i);
msg->sg.data[prev] = msg->sg.data[i];
} while (i != msg->sg.end);
sk_msg_iter_prev(msg, end);
}
static void sk_msg_shift_right(struct sk_msg *msg, int i)
{
struct scatterlist tmp, sge;
sk_msg_iter_next(msg, end);
sge = sk_msg_elem_cpy(msg, i);
sk_msg_iter_var_next(i);
tmp = sk_msg_elem_cpy(msg, i);
while (i != msg->sg.end) {
msg->sg.data[i] = sge;
sk_msg_iter_var_next(i);
sge = tmp;
tmp = sk_msg_elem_cpy(msg, i);
}
}
BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
u32, len, u64, flags)
{
u32 i = 0, l = 0, space, offset = 0;
u64 last = start + len;
int pop;
if (unlikely(flags))
return -EINVAL;
i = msg->sg.start;
do {
offset += l;
l = sk_msg_elem(msg, i)->length;
if (start < offset + l)
break;
sk_msg_iter_var_next(i);
} while (i != msg->sg.end);
if (start >= offset + l || last >= msg->sg.size)
return -EINVAL;
space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
pop = len;
if (start != offset) {
struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
int a = start;
int b = sge->length - pop - a;
sk_msg_iter_var_next(i);
if (pop < sge->length - a) {
if (space) {
sge->length = a;
sk_msg_shift_right(msg, i);
nsge = sk_msg_elem(msg, i);
get_page(sg_page(sge));
sg_set_page(nsge,
sg_page(sge),
b, sge->offset + pop + a);
} else {
struct page *page, *orig;
u8 *to, *from;
page = alloc_pages(__GFP_NOWARN |
__GFP_COMP | GFP_ATOMIC,
get_order(a + b));
if (unlikely(!page))
return -ENOMEM;
sge->length = a;
orig = sg_page(sge);
from = sg_virt(sge);
to = page_address(page);
memcpy(to, from, a);
memcpy(to + a, from + a + pop, b);
sg_set_page(sge, page, a + b, 0);
put_page(orig);
}
pop = 0;
} else if (pop >= sge->length - a) {
pop -= (sge->length - a);
sge->length = a;
}
}
while (pop) {
struct scatterlist *sge = sk_msg_elem(msg, i);
if (pop < sge->length) {
sge->length -= pop;
sge->offset += pop;
pop = 0;
} else {
pop -= sge->length;
sk_msg_shift_left(msg, i);
}
sk_msg_iter_var_next(i);
}
sk_mem_uncharge(msg->sk, len - pop);
msg->sg.size -= (len - pop);
sk_msg_compute_data_pointers(msg);
return 0;
}
static const struct bpf_func_proto bpf_msg_pop_data_proto = {
.func = bpf_msg_pop_data,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
};
#ifdef CONFIG_CGROUP_NET_CLASSID
BPF_CALL_0(bpf_get_cgroup_classid_curr)
{
return __task_get_classid(current);
}
const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = {
.func = bpf_get_cgroup_classid_curr,
.gpl_only = false,
.ret_type = RET_INTEGER,
};
BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb)
{
struct sock *sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk))
return 0;
return sock_cgroup_classid(&sk->sk_cgrp_data);
}
static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = {
.func = bpf_skb_cgroup_classid,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
#endif
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{
return task_get_classid(skb);
}
static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
.func = bpf_get_cgroup_classid,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
{
return dst_tclassid(skb);
}
static const struct bpf_func_proto bpf_get_route_realm_proto = {
.func = bpf_get_route_realm,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
{
return skb_get_hash(skb);
}
static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
.func = bpf_get_hash_recalc,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
{
skb_clear_hash(skb);
return 0;
}
static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
.func = bpf_set_hash_invalid,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
{
__skb_set_sw_hash(skb, hash, true);
return 0;
}
static const struct bpf_func_proto bpf_set_hash_proto = {
.func = bpf_set_hash,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
u16, vlan_tci)
{
int ret;
if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
vlan_proto != htons(ETH_P_8021AD)))
vlan_proto = htons(ETH_P_8021Q);
bpf_push_mac_rcsum(skb);
ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
bpf_pull_mac_rcsum(skb);
bpf_compute_data_pointers(skb);
return ret;
}
static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
.func = bpf_skb_vlan_push,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
{
int ret;
bpf_push_mac_rcsum(skb);
ret = skb_vlan_pop(skb);
bpf_pull_mac_rcsum(skb);
bpf_compute_data_pointers(skb);
return ret;
}
static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
.func = bpf_skb_vlan_pop,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
{
skb_push(skb, len);
memmove(skb->data, skb->data + len, off);
memset(skb->data + off, 0, len);
return 0;
}
static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
{
void *old_data;
if (unlikely(!pskb_may_pull(skb, off + len)))
return -ENOMEM;
old_data = skb->data;
__skb_pull(skb, len);
skb_postpull_rcsum(skb, old_data + off, len);
memmove(skb->data, old_data, off);
return 0;
}
static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
{
bool trans_same = skb->transport_header == skb->network_header;
int ret;
ret = bpf_skb_generic_push(skb, off, len);
if (likely(!ret)) {
skb->mac_header -= len;
skb->network_header -= len;
if (trans_same)
skb->transport_header = skb->network_header;
}
return ret;
}
static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
{
bool trans_same = skb->transport_header == skb->network_header;
int ret;
ret = bpf_skb_generic_pop(skb, off, len);
if (likely(!ret)) {
skb->mac_header += len;
skb->network_header += len;
if (trans_same)
skb->transport_header = skb->network_header;
}
return ret;
}
static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
{
const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
u32 off = skb_mac_header_len(skb);
int ret;
ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0))
return ret;
ret = bpf_skb_net_hdr_push(skb, off, len_diff);
if (unlikely(ret < 0))
return ret;
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (shinfo->gso_type & SKB_GSO_TCPV4) {
shinfo->gso_type &= ~SKB_GSO_TCPV4;
shinfo->gso_type |= SKB_GSO_TCPV6;
}
}
skb->protocol = htons(ETH_P_IPV6);
skb_clear_hash(skb);
return 0;
}
static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
{
const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
u32 off = skb_mac_header_len(skb);
int ret;
ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0))
return ret;
ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
if (unlikely(ret < 0))
return ret;
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (shinfo->gso_type & SKB_GSO_TCPV6) {
shinfo->gso_type &= ~SKB_GSO_TCPV6;
shinfo->gso_type |= SKB_GSO_TCPV4;
}
}
skb->protocol = htons(ETH_P_IP);
skb_clear_hash(skb);
return 0;
}
static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
{
__be16 from_proto = skb->protocol;
if (from_proto == htons(ETH_P_IP) &&
to_proto == htons(ETH_P_IPV6))
return bpf_skb_proto_4_to_6(skb);
if (from_proto == htons(ETH_P_IPV6) &&
to_proto == htons(ETH_P_IP))
return bpf_skb_proto_6_to_4(skb);
return -ENOTSUPP;
}
BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
u64, flags)
{
int ret;
if (unlikely(flags))
return -EINVAL;
ret = bpf_skb_proto_xlat(skb, proto);
bpf_compute_data_pointers(skb);
return ret;
}
static const struct bpf_func_proto bpf_skb_change_proto_proto = {
.func = bpf_skb_change_proto,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
{
if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
!skb_pkt_type_ok(pkt_type)))
return -EINVAL;
skb->pkt_type = pkt_type;
return 0;
}
static const struct bpf_func_proto bpf_skb_change_type_proto = {
.func = bpf_skb_change_type,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
{
switch (skb->protocol) {
case htons(ETH_P_IP):
return sizeof(struct iphdr);
case htons(ETH_P_IPV6):
return sizeof(struct ipv6hdr);
default:
return ~0U;
}
}
#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
#define BPF_F_ADJ_ROOM_DECAP_L3_MASK (BPF_F_ADJ_ROOM_DECAP_L3_IPV4 | \
BPF_F_ADJ_ROOM_DECAP_L3_IPV6)
#define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \
BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
BPF_F_ADJ_ROOM_ENCAP_L2_ETH | \
BPF_F_ADJ_ROOM_ENCAP_L2( \
BPF_ADJ_ROOM_ENCAP_L2_MASK) | \
BPF_F_ADJ_ROOM_DECAP_L3_MASK)
static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
u64 flags)
{
u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
u16 mac_len = 0, inner_net = 0, inner_trans = 0;
unsigned int gso_type = SKB_GSO_DODGY;
int ret;
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
return -ENOTSUPP;
}
ret = skb_cow_head(skb, len_diff);
if (unlikely(ret < 0))
return ret;
if (encap) {
if (skb->protocol != htons(ETH_P_IP) &&
skb->protocol != htons(ETH_P_IPV6))
return -ENOTSUPP;
if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
return -EINVAL;
if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
return -EINVAL;
if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH &&
inner_mac_len < ETH_HLEN)
return -EINVAL;
if (skb->encapsulation)
return -EALREADY;
mac_len = skb->network_header - skb->mac_header;
inner_net = skb->network_header;
if (inner_mac_len > len_diff)
return -EINVAL;
inner_trans = skb->transport_header;
}
ret = bpf_skb_net_hdr_push(skb, off, len_diff);
if (unlikely(ret < 0))
return ret;
if (encap) {
skb->inner_mac_header = inner_net - inner_mac_len;
skb->inner_network_header = inner_net;
skb->inner_transport_header = inner_trans;
if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH)
skb_set_inner_protocol(skb, htons(ETH_P_TEB));
else
skb_set_inner_protocol(skb, skb->protocol);
skb->encapsulation = 1;
skb_set_network_header(skb, mac_len);
if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
gso_type |= SKB_GSO_UDP_TUNNEL;
else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
gso_type |= SKB_GSO_GRE;
else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
gso_type |= SKB_GSO_IPXIP6;
else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
gso_type |= SKB_GSO_IPXIP4;
if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
sizeof(struct ipv6hdr) :
sizeof(struct iphdr);
skb_set_transport_header(skb, mac_len + nh_len);
}
if (skb->protocol == htons(ETH_P_IP) &&
flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
skb->protocol = htons(ETH_P_IPV6);
else if (skb->protocol == htons(ETH_P_IPV6) &&
flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
skb->protocol = htons(ETH_P_IP);
}
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
skb_decrease_gso_size(shinfo, len_diff);
shinfo->gso_type |= gso_type;
shinfo->gso_segs = 0;
}
return 0;
}
static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
u64 flags)
{
int ret;
if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO |
BPF_F_ADJ_ROOM_DECAP_L3_MASK |
BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
return -EINVAL;
if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
return -ENOTSUPP;
}
ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0))
return ret;
ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
if (unlikely(ret < 0))
return ret;
if (skb->protocol == htons(ETH_P_IP) &&
flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV6)
skb->protocol = htons(ETH_P_IPV6);
else if (skb->protocol == htons(ETH_P_IPV6) &&
flags & BPF_F_ADJ_ROOM_DECAP_L3_IPV4)
skb->protocol = htons(ETH_P_IP);
if (skb_is_gso(skb)) {
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
skb_increase_gso_size(shinfo, len_diff);
shinfo->gso_type |= SKB_GSO_DODGY;
shinfo->gso_segs = 0;
}
return 0;
}
#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
u32, mode, u64, flags)
{
u32 len_diff_abs = abs(len_diff);
bool shrink = len_diff < 0;
int ret = 0;
if (unlikely(flags || mode))
return -EINVAL;
if (unlikely(len_diff_abs > 0xfffU))
return -EFAULT;
if (!shrink) {
ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0))
return ret;
__skb_push(skb, len_diff_abs);
memset(skb->data, 0, len_diff_abs);
} else {
if (unlikely(!pskb_may_pull(skb, len_diff_abs)))
return -ENOMEM;
__skb_pull(skb, len_diff_abs);
}
if (tls_sw_has_ctx_rx(skb->sk)) {
struct strp_msg *rxm = strp_msg(skb);
rxm->full_len += len_diff;
}
return ret;
}
static const struct bpf_func_proto sk_skb_adjust_room_proto = {
.func = sk_skb_adjust_room,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
u32, mode, u64, flags)
{
u32 len_cur, len_diff_abs = abs(len_diff);
u32 len_min = bpf_skb_net_base_len(skb);
u32 len_max = BPF_SKB_MAX_LEN;
__be16 proto = skb->protocol;
bool shrink = len_diff < 0;
u32 off;
int ret;
if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK |
BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
return -EINVAL;
if (unlikely(len_diff_abs > 0xfffU))
return -EFAULT;
if (unlikely(proto != htons(ETH_P_IP) &&
proto != htons(ETH_P_IPV6)))
return -ENOTSUPP;
off = skb_mac_header_len(skb);
switch (mode) {
case BPF_ADJ_ROOM_NET:
off += bpf_skb_net_base_len(skb);
break;
case BPF_ADJ_ROOM_MAC:
break;
default:
return -ENOTSUPP;
}
if (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) {
if (!shrink)
return -EINVAL;
switch (flags & BPF_F_ADJ_ROOM_DECAP_L3_MASK) {
case BPF_F_ADJ_ROOM_DECAP_L3_IPV4:
len_min = sizeof(struct iphdr);
break;
case BPF_F_ADJ_ROOM_DECAP_L3_IPV6:
len_min = sizeof(struct ipv6hdr);
break;
default:
return -EINVAL;
}
}
len_cur = skb->len - skb_network_offset(skb);
if ((shrink && (len_diff_abs >= len_cur ||
len_cur - len_diff_abs < len_min)) ||
(!shrink && (skb->len + len_diff_abs > len_max &&
!skb_is_gso(skb))))
return -ENOTSUPP;
ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
bpf_skb_net_grow(skb, off, len_diff_abs, flags);
if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET))
__skb_reset_checksum_unnecessary(skb);
bpf_compute_data_pointers(skb);
return ret;
}
static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
.func = bpf_skb_adjust_room,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
};
static u32 __bpf_skb_min_len(const struct sk_buff *skb)
{
u32 min_len = skb_network_offset(skb);
if (skb_transport_header_was_set(skb))
min_len = skb_transport_offset(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
min_len = skb_checksum_start_offset(skb) +
skb->csum_offset + sizeof(__sum16);
return min_len;
}
static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
{
unsigned int old_len = skb->len;
int ret;
ret = __skb_grow_rcsum(skb, new_len);
if (!ret)
memset(skb->data + old_len, 0, new_len - old_len);
return ret;
}
static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
{
return __skb_trim_rcsum(skb, new_len);
}
static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
u64 flags)
{
u32 max_len = BPF_SKB_MAX_LEN;
u32 min_len = __bpf_skb_min_len(skb);
int ret;
if (unlikely(flags || new_len > max_len || new_len < min_len))
return -EINVAL;
if (skb->encapsulation)
return -ENOTSUPP;
ret = __bpf_try_make_writable(skb, skb->len);
if (!ret) {
if (new_len > skb->len)
ret = bpf_skb_grow_rcsum(skb, new_len);
else if (new_len < skb->len)
ret = bpf_skb_trim_rcsum(skb, new_len);
if (!ret && skb_is_gso(skb))
skb_gso_reset(skb);
}
return ret;
}
BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
u64, flags)
{
int ret = __bpf_skb_change_tail(skb, new_len, flags);
bpf_compute_data_pointers(skb);
return ret;
}
static const struct bpf_func_proto bpf_skb_change_tail_proto = {
.func = bpf_skb_change_tail,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
u64, flags)
{
return __bpf_skb_change_tail(skb, new_len, flags);
}
static const struct bpf_func_proto sk_skb_change_tail_proto = {
.func = sk_skb_change_tail,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
u64 flags)
{
u32 max_len = BPF_SKB_MAX_LEN;
u32 new_len = skb->len + head_room;
int ret;
if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
new_len < skb->len))
return -EINVAL;
ret = skb_cow(skb, head_room);
if (likely(!ret)) {
__skb_push(skb, head_room);
memset(skb->data, 0, head_room);
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
}
return ret;
}
BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
u64, flags)
{
int ret = __bpf_skb_change_head(skb, head_room, flags);
bpf_compute_data_pointers(skb);
return ret;
}
static const struct bpf_func_proto bpf_skb_change_head_proto = {
.func = bpf_skb_change_head,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
u64, flags)
{
return __bpf_skb_change_head(skb, head_room, flags);
}
static const struct bpf_func_proto sk_skb_change_head_proto = {
.func = sk_skb_change_head,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
BPF_CALL_1(bpf_xdp_get_buff_len, struct xdp_buff*, xdp)
{
return xdp_get_buff_len(xdp);
}
static const struct bpf_func_proto bpf_xdp_get_buff_len_proto = {
.func = bpf_xdp_get_buff_len,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
BTF_ID_LIST_SINGLE(bpf_xdp_get_buff_len_bpf_ids, struct, xdp_buff)
const struct bpf_func_proto bpf_xdp_get_buff_len_trace_proto = {
.func = bpf_xdp_get_buff_len,
.gpl_only = false,
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &bpf_xdp_get_buff_len_bpf_ids[0],
};
static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
{
return xdp_data_meta_unsupported(xdp) ? 0 :
xdp->data - xdp->data_meta;
}
BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
{
void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
unsigned long metalen = xdp_get_metalen(xdp);
void *data_start = xdp_frame_end + metalen;
void *data = xdp->data + offset;
if (unlikely(data < data_start ||
data > xdp->data_end - ETH_HLEN))
return -EINVAL;
if (metalen)
memmove(xdp->data_meta + offset,
xdp->data_meta, metalen);
xdp->data_meta += offset;
xdp->data = data;
return 0;
}
static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
.func = bpf_xdp_adjust_head,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
void bpf_xdp_copy_buf(struct xdp_buff *xdp, unsigned long off,
void *buf, unsigned long len, bool flush)
{
unsigned long ptr_len, ptr_off = 0;
skb_frag_t *next_frag, *end_frag;
struct skb_shared_info *sinfo;
void *src, *dst;
u8 *ptr_buf;
if (likely(xdp->data_end - xdp->data >= off + len)) {
src = flush ? buf : xdp->data + off;
dst = flush ? xdp->data + off : buf;
memcpy(dst, src, len);
return;
}
sinfo = xdp_get_shared_info_from_buff(xdp);
end_frag = &sinfo->frags[sinfo->nr_frags];
next_frag = &sinfo->frags[0];
ptr_len = xdp->data_end - xdp->data;
ptr_buf = xdp->data;
while (true) {
if (off < ptr_off + ptr_len) {
unsigned long copy_off = off - ptr_off;
unsigned long copy_len = min(len, ptr_len - copy_off);
src = flush ? buf : ptr_buf + copy_off;
dst = flush ? ptr_buf + copy_off : buf;
memcpy(dst, src, copy_len);
off += copy_len;
len -= copy_len;
buf += copy_len;
}
if (!len || next_frag == end_frag)
break;
ptr_off += ptr_len;
ptr_buf = skb_frag_address(next_frag);
ptr_len = skb_frag_size(next_frag);
next_frag++;
}
}
void *bpf_xdp_pointer(struct xdp_buff *xdp, u32 offset, u32 len)
{
u32 size = xdp->data_end - xdp->data;
struct skb_shared_info *sinfo;
void *addr = xdp->data;
int i;
if (unlikely(offset > 0xffff || len > 0xffff))
return ERR_PTR(-EFAULT);
if (unlikely(offset + len > xdp_get_buff_len(xdp)))
return ERR_PTR(-EINVAL);
if (likely(offset < size))
goto out;
sinfo = xdp_get_shared_info_from_buff(xdp);
offset -= size;
for (i = 0; i < sinfo->nr_frags; i++) {
u32 frag_size = skb_frag_size(&sinfo->frags[i]);
if (offset < frag_size) {
addr = skb_frag_address(&sinfo->frags[i]);
size = frag_size;
break;
}
offset -= frag_size;
}
out:
return offset + len <= size ? addr + offset : NULL;
}
BPF_CALL_4(bpf_xdp_load_bytes, struct xdp_buff *, xdp, u32, offset,
void *, buf, u32, len)
{
void *ptr;
ptr = bpf_xdp_pointer(xdp, offset, len);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
if (!ptr)
bpf_xdp_copy_buf(xdp, offset, buf, len, false);
else
memcpy(buf, ptr, len);
return 0;
}
static const struct bpf_func_proto bpf_xdp_load_bytes_proto = {
.func = bpf_xdp_load_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
.arg4_type = ARG_CONST_SIZE,
};
int __bpf_xdp_load_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len)
{
return ____bpf_xdp_load_bytes(xdp, offset, buf, len);
}
BPF_CALL_4(bpf_xdp_store_bytes, struct xdp_buff *, xdp, u32, offset,
void *, buf, u32, len)
{
void *ptr;
ptr = bpf_xdp_pointer(xdp, offset, len);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
if (!ptr)
bpf_xdp_copy_buf(xdp, offset, buf, len, true);
else
memcpy(ptr, buf, len);
return 0;
}
static const struct bpf_func_proto bpf_xdp_store_bytes_proto = {
.func = bpf_xdp_store_bytes,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
.arg4_type = ARG_CONST_SIZE,
};
int __bpf_xdp_store_bytes(struct xdp_buff *xdp, u32 offset, void *buf, u32 len)
{
return ____bpf_xdp_store_bytes(xdp, offset, buf, len);
}
static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1];
struct xdp_rxq_info *rxq = xdp->rxq;
unsigned int tailroom;
if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
return -EOPNOTSUPP;
tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag);
if (unlikely(offset > tailroom))
return -EINVAL;
memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset);
skb_frag_size_add(frag, offset);
sinfo->xdp_frags_size += offset;
return 0;
}
static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i, n_frags_free = 0, len_free = 0;
if (unlikely(offset > (int)xdp_get_buff_len(xdp) - ETH_HLEN))
return -EINVAL;
for (i = sinfo->nr_frags - 1; i >= 0 && offset > 0; i--) {
skb_frag_t *frag = &sinfo->frags[i];
int shrink = min_t(int, offset, skb_frag_size(frag));
len_free += shrink;
offset -= shrink;
if (skb_frag_size(frag) == shrink) {
struct page *page = skb_frag_page(frag);
__xdp_return(page_address(page), &xdp->rxq->mem,
false, NULL);
n_frags_free++;
} else {
skb_frag_size_sub(frag, shrink);
break;
}
}
sinfo->nr_frags -= n_frags_free;
sinfo->xdp_frags_size -= len_free;
if (unlikely(!sinfo->nr_frags)) {
xdp_buff_clear_frags_flag(xdp);
xdp->data_end -= offset;
}
return 0;
}
BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
{
void *data_hard_end = xdp_data_hard_end(xdp);
void *data_end = xdp->data_end + offset;
if (unlikely(xdp_buff_has_frags(xdp))) {
if (offset < 0)
return bpf_xdp_frags_shrink_tail(xdp, -offset);
return bpf_xdp_frags_increase_tail(xdp, offset);
}
if (unlikely(data_end > data_hard_end))
return -EINVAL;
if (unlikely(data_end < xdp->data + ETH_HLEN))
return -EINVAL;
if (offset > 0)
memset(xdp->data_end, 0, offset);
xdp->data_end = data_end;
return 0;
}
static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
.func = bpf_xdp_adjust_tail,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
{
void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
void *meta = xdp->data_meta + offset;
unsigned long metalen = xdp->data - meta;
if (xdp_data_meta_unsupported(xdp))
return -ENOTSUPP;
if (unlikely(meta < xdp_frame_end ||
meta > xdp->data))
return -EINVAL;
if (unlikely(xdp_metalen_invalid(metalen)))
return -EACCES;
xdp->data_meta = meta;
return 0;
}
static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
.func = bpf_xdp_adjust_meta,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
void xdp_do_flush(void)
{
__dev_flush();
__cpu_map_flush();
__xsk_map_flush();
}
EXPORT_SYMBOL_GPL(xdp_do_flush);
void bpf_clear_redirect_map(struct bpf_map *map)
{
struct bpf_redirect_info *ri;
int cpu;
for_each_possible_cpu(cpu) {
ri = per_cpu_ptr(&bpf_redirect_info, cpu);
if (unlikely(READ_ONCE(ri->map) == map))
cmpxchg(&ri->map, map, NULL);
}
}
DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key);
u32 xdp_master_redirect(struct xdp_buff *xdp)
{
struct net_device *master, *slave;
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
if (slave && slave != xdp->rxq->dev) {
ri->tgt_index = slave->ifindex;
ri->map_id = INT_MAX;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
return XDP_REDIRECT;
}
return XDP_TX;
}
EXPORT_SYMBOL_GPL(xdp_master_redirect);
static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
int err;
ri->map_id = 0;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
err = __xsk_map_redirect(fwd, xdp);
if (unlikely(err))
goto err;
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
return 0;
err:
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
return err;
}
static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
struct net_device *dev,
struct xdp_frame *xdpf,
struct bpf_prog *xdp_prog)
{
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
struct bpf_map *map;
int err;
ri->map_id = 0;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
if (unlikely(!xdpf)) {
err = -EOVERFLOW;
goto err;
}
switch (map_type) {
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
map = READ_ONCE(ri->map);
if (unlikely(map)) {
WRITE_ONCE(ri->map, NULL);
err = dev_map_enqueue_multi(xdpf, dev, map,
ri->flags & BPF_F_EXCLUDE_INGRESS);
} else {
err = dev_map_enqueue(fwd, xdpf, dev);
}
break;
case BPF_MAP_TYPE_CPUMAP:
err = cpu_map_enqueue(fwd, xdpf, dev);
break;
case BPF_MAP_TYPE_UNSPEC:
if (map_id == INT_MAX) {
fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
if (unlikely(!fwd)) {
err = -EINVAL;
break;
}
err = dev_xdp_enqueue(fwd, xdpf, dev);
break;
}
fallthrough;
default:
err = -EBADRQC;
}
if (unlikely(err))
goto err;
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
return 0;
err:
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
return err;
}
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
enum bpf_map_type map_type = ri->map_type;
if (map_type == BPF_MAP_TYPE_XSKMAP)
return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp),
xdp_prog);
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);
int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
enum bpf_map_type map_type = ri->map_type;
if (map_type == BPF_MAP_TYPE_XSKMAP)
return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog);
}
EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
struct bpf_prog *xdp_prog,
void *fwd,
enum bpf_map_type map_type, u32 map_id)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct bpf_map *map;
int err;
switch (map_type) {
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
case BPF_MAP_TYPE_DEVMAP_HASH:
map = READ_ONCE(ri->map);
if (unlikely(map)) {
WRITE_ONCE(ri->map, NULL);
err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
ri->flags & BPF_F_EXCLUDE_INGRESS);
} else {
err = dev_map_generic_redirect(fwd, skb, xdp_prog);
}
if (unlikely(err))
goto err;
break;
case BPF_MAP_TYPE_XSKMAP:
err = xsk_generic_rcv(fwd, xdp);
if (err)
goto err;
consume_skb(skb);
break;
case BPF_MAP_TYPE_CPUMAP:
err = cpu_map_generic_redirect(fwd, skb);
if (unlikely(err))
goto err;
break;
default:
err = -EBADRQC;
goto err;
}
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
return 0;
err:
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
return err;
}
int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
int err;
ri->map_id = 0;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
if (unlikely(!fwd)) {
err = -EINVAL;
goto err;
}
err = xdp_ok_fwd_dev(fwd, skb->len);
if (unlikely(err))
goto err;
skb->dev = fwd;
_trace_xdp_redirect(dev, xdp_prog, ri->tgt_index);
generic_xdp_tx(skb, xdp_prog);
return 0;
}
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
err:
_trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
return err;
}
BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
if (unlikely(flags))
return XDP_ABORTED;
ri->tgt_index = ifindex;
ri->map_id = INT_MAX;
ri->map_type = BPF_MAP_TYPE_UNSPEC;
return XDP_REDIRECT;
}
static const struct bpf_func_proto bpf_xdp_redirect_proto = {
.func = bpf_xdp_redirect,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u64, key,
u64, flags)
{
return map->ops->map_redirect(map, key, flags);
}
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
.func = bpf_xdp_redirect_map,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
unsigned long off, unsigned long len)
{
void *ptr = skb_header_pointer(skb, off, len, dst_buff);
if (unlikely(!ptr))
return len;
if (ptr != dst_buff)
memcpy(dst_buff, ptr, len);
return 0;
}
BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
u64, flags, void *, meta, u64, meta_size)
{
u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
return -EINVAL;
if (unlikely(!skb || skb_size > skb->len))
return -EFAULT;
return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
bpf_skb_copy);
}
static const struct bpf_func_proto bpf_skb_event_output_proto = {
.func = bpf_skb_event_output,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
const struct bpf_func_proto bpf_skb_output_proto = {
.func = bpf_skb_event_output,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &bpf_skb_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
static unsigned short bpf_tunnel_key_af(u64 flags)
{
return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
}
BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
u32, size, u64, flags)
{
const struct ip_tunnel_info *info = skb_tunnel_info(skb);
u8 compat[sizeof(struct bpf_tunnel_key)];
void *to_orig = to;
int err;
if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6 |
BPF_F_TUNINFO_FLAGS)))) {
err = -EINVAL;
goto err_clear;
}
if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
err = -EPROTO;
goto err_clear;
}
if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
err = -EINVAL;
switch (size) {
case offsetof(struct bpf_tunnel_key, local_ipv6[0]):
case offsetof(struct bpf_tunnel_key, tunnel_label):
case offsetof(struct bpf_tunnel_key, tunnel_ext):
goto set_compat;
case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
if (ip_tunnel_info_af(info) != AF_INET)
goto err_clear;
set_compat:
to = (struct bpf_tunnel_key *)compat;
break;
default:
goto err_clear;
}
}
to->tunnel_id = be64_to_cpu(info->key.tun_id);
to->tunnel_tos = info->key.tos;
to->tunnel_ttl = info->key.ttl;
if (flags & BPF_F_TUNINFO_FLAGS)
to->tunnel_flags = info->key.tun_flags;
else
to->tunnel_ext = 0;
if (flags & BPF_F_TUNINFO_IPV6) {
memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
sizeof(to->remote_ipv6));
memcpy(to->local_ipv6, &info->key.u.ipv6.dst,
sizeof(to->local_ipv6));
to->tunnel_label = be32_to_cpu(info->key.label);
} else {
to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
to->local_ipv4 = be32_to_cpu(info->key.u.ipv4.dst);
memset(&to->local_ipv6[1], 0, sizeof(__u32) * 3);
to->tunnel_label = 0;
}
if (unlikely(size != sizeof(struct bpf_tunnel_key)))
memcpy(to_orig, to, size);
return 0;
err_clear:
memset(to_orig, 0, size);
return err;
}
static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
.func = bpf_skb_get_tunnel_key,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
{
const struct ip_tunnel_info *info = skb_tunnel_info(skb);
int err;
if (unlikely(!info ||
!(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
err = -ENOENT;
goto err_clear;
}
if (unlikely(size < info->options_len)) {
err = -ENOMEM;
goto err_clear;
}
ip_tunnel_info_opts_get(to, info);
if (size > info->options_len)
memset(to + info->options_len, 0, size - info->options_len);
return info->options_len;
err_clear:
memset(to, 0, size);
return err;
}
static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
.func = bpf_skb_get_tunnel_opt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_CONST_SIZE,
};
static struct metadata_dst __percpu *md_dst;
BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
const struct bpf_tunnel_key *, from, u32, size, u64, flags)
{
struct metadata_dst *md = this_cpu_ptr(md_dst);
u8 compat[sizeof(struct bpf_tunnel_key)];
struct ip_tunnel_info *info;
if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER |
BPF_F_NO_TUNNEL_KEY)))
return -EINVAL;
if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
switch (size) {
case offsetof(struct bpf_tunnel_key, local_ipv6[0]):
case offsetof(struct bpf_tunnel_key, tunnel_label):
case offsetof(struct bpf_tunnel_key, tunnel_ext):
case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
memcpy(compat, from, size);
memset(compat + size, 0, sizeof(compat) - size);
from = (const struct bpf_tunnel_key *) compat;
break;
default:
return -EINVAL;
}
}
if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
from->tunnel_ext))
return -EINVAL;
skb_dst_drop(skb);
dst_hold((struct dst_entry *) md);
skb_dst_set(skb, (struct dst_entry *) md);
info = &md->u.tun_info;
memset(info, 0, sizeof(*info));
info->mode = IP_TUNNEL_INFO_TX;
info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
if (flags & BPF_F_DONT_FRAGMENT)
info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
if (flags & BPF_F_ZERO_CSUM_TX)
info->key.tun_flags &= ~TUNNEL_CSUM;
if (flags & BPF_F_SEQ_NUMBER)
info->key.tun_flags |= TUNNEL_SEQ;
if (flags & BPF_F_NO_TUNNEL_KEY)
info->key.tun_flags &= ~TUNNEL_KEY;
info->key.tun_id = cpu_to_be64(from->tunnel_id);
info->key.tos = from->tunnel_tos;
info->key.ttl = from->tunnel_ttl;
if (flags & BPF_F_TUNINFO_IPV6) {
info->mode |= IP_TUNNEL_INFO_IPV6;
memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
sizeof(from->remote_ipv6));
memcpy(&info->key.u.ipv6.src, from->local_ipv6,
sizeof(from->local_ipv6));
info->key.label = cpu_to_be32(from->tunnel_label) &
IPV6_FLOWLABEL_MASK;
} else {
info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
info->key.u.ipv4.src = cpu_to_be32(from->local_ipv4);
info->key.flow_flags = FLOWI_FLAG_ANYSRC;
}
return 0;
}
static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
.func = bpf_skb_set_tunnel_key,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
const u8 *, from, u32, size)
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
const struct metadata_dst *md = this_cpu_ptr(md_dst);
if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
return -EINVAL;
if (unlikely(size > IP_TUNNEL_OPTS_MAX))
return -ENOMEM;
ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
return 0;
}
static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
.func = bpf_skb_set_tunnel_opt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
};
static const struct bpf_func_proto *
bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
{
if (!md_dst) {
struct metadata_dst __percpu *tmp;
tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
METADATA_IP_TUNNEL,
GFP_KERNEL);
if (!tmp)
return NULL;
if (cmpxchg(&md_dst, NULL, tmp))
metadata_dst_free_percpu(tmp);
}
switch (which) {
case BPF_FUNC_skb_set_tunnel_key:
return &bpf_skb_set_tunnel_key_proto;
case BPF_FUNC_skb_set_tunnel_opt:
return &bpf_skb_set_tunnel_opt_proto;
default:
return NULL;
}
}
BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
u32, idx)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct cgroup *cgrp;
struct sock *sk;
sk = skb_to_full_sk(skb);
if (!sk || !sk_fullsock(sk))
return -ENOENT;
if (unlikely(idx >= array->map.max_entries))
return -E2BIG;
cgrp = READ_ONCE(array->ptrs[idx]);
if (unlikely(!cgrp))
return -EAGAIN;
return sk_under_cgroup_hierarchy(sk, cgrp);
}
static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
.func = bpf_skb_under_cgroup,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
};
#ifdef CONFIG_SOCK_CGROUP_DATA
static inline u64 __bpf_sk_cgroup_id(struct sock *sk)
{
struct cgroup *cgrp;
sk = sk_to_full_sk(sk);
if (!sk || !sk_fullsock(sk))
return 0;
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
return cgroup_id(cgrp);
}
BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
{
return __bpf_sk_cgroup_id(skb->sk);
}
static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
.func = bpf_skb_cgroup_id,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
int ancestor_level)
{
struct cgroup *ancestor;
struct cgroup *cgrp;
sk = sk_to_full_sk(sk);
if (!sk || !sk_fullsock(sk))
return 0;
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
ancestor = cgroup_ancestor(cgrp, ancestor_level);
if (!ancestor)
return 0;
return cgroup_id(ancestor);
}
BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
ancestor_level)
{
return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level);
}
static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
.func = bpf_skb_ancestor_cgroup_id,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk)
{
return __bpf_sk_cgroup_id(sk);
}
static const struct bpf_func_proto bpf_sk_cgroup_id_proto = {
.func = bpf_sk_cgroup_id,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
};
BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level)
{
return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level);
}
static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = {
.func = bpf_sk_ancestor_cgroup_id,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_ANYTHING,
};
#endif
static unsigned long bpf_xdp_copy(void *dst, const void *ctx,
unsigned long off, unsigned long len)
{
struct xdp_buff *xdp = (struct xdp_buff *)ctx;
bpf_xdp_copy_buf(xdp, off, dst, len, false);
return 0;
}
BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
u64, flags, void *, meta, u64, meta_size)
{
u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
return -EINVAL;
if (unlikely(!xdp || xdp_size > xdp_get_buff_len(xdp)))
return -EFAULT;
return bpf_event_output(map, flags, meta, meta_size, xdp,
xdp_size, bpf_xdp_copy);
}
static const struct bpf_func_proto bpf_xdp_event_output_proto = {
.func = bpf_xdp_event_output,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
const struct bpf_func_proto bpf_xdp_output_proto = {
.func = bpf_xdp_event_output,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &bpf_xdp_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
{
return skb->sk ? __sock_gen_cookie(skb->sk) : 0;
}
static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
.func = bpf_get_socket_cookie,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
{
return __sock_gen_cookie(ctx->sk);
}
static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
.func = bpf_get_socket_cookie_sock_addr,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx)
{
return __sock_gen_cookie(ctx);
}
static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = {
.func = bpf_get_socket_cookie_sock,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
BPF_CALL_1(bpf_get_socket_ptr_cookie, struct sock *, sk)
{
return sk ? sock_gen_cookie(sk) : 0;
}
const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto = {
.func = bpf_get_socket_ptr_cookie,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON | PTR_MAYBE_NULL,
};
BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
{
return __sock_gen_cookie(ctx->sk);
}
static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
.func = bpf_get_socket_cookie_sock_ops,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
static u64 __bpf_get_netns_cookie(struct sock *sk)
{
const struct net *net = sk ? sock_net(sk) : &init_net;
return net->net_cookie;
}
BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx)
{
return __bpf_get_netns_cookie(ctx);
}
static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = {
.func = bpf_get_netns_cookie_sock,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX_OR_NULL,
};
BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
{
return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
}
static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = {
.func = bpf_get_netns_cookie_sock_addr,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX_OR_NULL,
};
BPF_CALL_1(bpf_get_netns_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
{
return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
}
static const struct bpf_func_proto bpf_get_netns_cookie_sock_ops_proto = {
.func = bpf_get_netns_cookie_sock_ops,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX_OR_NULL,
};
BPF_CALL_1(bpf_get_netns_cookie_sk_msg, struct sk_msg *, ctx)
{
return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
}
static const struct bpf_func_proto bpf_get_netns_cookie_sk_msg_proto = {
.func = bpf_get_netns_cookie_sk_msg,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX_OR_NULL,
};
BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
{
struct sock *sk = sk_to_full_sk(skb->sk);
kuid_t kuid;
if (!sk || !sk_fullsock(sk))
return overflowuid;
kuid = sock_net_uid(sock_net(sk), sk);
return from_kuid_munged(sock_net(sk)->user_ns, kuid);
}
static const struct bpf_func_proto bpf_get_socket_uid_proto = {
.func = bpf_get_socket_uid,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
static int sol_socket_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
{
switch (optname) {
case SO_REUSEADDR:
case SO_SNDBUF:
case SO_RCVBUF:
case SO_KEEPALIVE:
case SO_PRIORITY:
case SO_REUSEPORT:
case SO_RCVLOWAT:
case SO_MARK:
case SO_MAX_PACING_RATE:
case SO_BINDTOIFINDEX:
case SO_TXREHASH:
if (*optlen != sizeof(int))
return -EINVAL;
break;
case SO_BINDTODEVICE:
break;
default:
return -EINVAL;
}
if (getopt) {
if (optname == SO_BINDTODEVICE)
return -EINVAL;
return sk_getsockopt(sk, SOL_SOCKET, optname,
KERNEL_SOCKPTR(optval),
KERNEL_SOCKPTR(optlen));
}
return sk_setsockopt(sk, SOL_SOCKET, optname,
KERNEL_SOCKPTR(optval), *optlen);
}
static int bpf_sol_tcp_setsockopt(struct sock *sk, int optname,
char *optval, int optlen)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned long timeout;
int val;
if (optlen != sizeof(int))
return -EINVAL;
val = *(int *)optval;
switch (optname) {
case TCP_BPF_IW:
if (val <= 0 || tp->data_segs_out > tp->syn_data)
return -EINVAL;
tcp_snd_cwnd_set(tp, val);
break;
case TCP_BPF_SNDCWND_CLAMP:
if (val <= 0)
return -EINVAL;
tp->snd_cwnd_clamp = val;
tp->snd_ssthresh = val;
break;
case TCP_BPF_DELACK_MAX:
timeout = usecs_to_jiffies(val);
if (timeout > TCP_DELACK_MAX ||
timeout < TCP_TIMEOUT_MIN)
return -EINVAL;
inet_csk(sk)->icsk_delack_max = timeout;
break;
case TCP_BPF_RTO_MIN:
timeout = usecs_to_jiffies(val);
if (timeout > TCP_RTO_MIN ||
timeout < TCP_TIMEOUT_MIN)
return -EINVAL;
inet_csk(sk)->icsk_rto_min = timeout;
break;
default:
return -EINVAL;
}
return 0;
}
static int sol_tcp_sockopt_congestion(struct sock *sk, char *optval,
int *optlen, bool getopt)
{
struct tcp_sock *tp;
int ret;
if (*optlen < 2)
return -EINVAL;
if (getopt) {
if (!inet_csk(sk)->icsk_ca_ops)
return -EINVAL;
optval[--(*optlen)] = '\0';
return do_tcp_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
KERNEL_SOCKPTR(optval),
KERNEL_SOCKPTR(optlen));
}
if (*optlen >= sizeof("cdg") - 1 && !strncmp("cdg", optval, *optlen))
return -ENOTSUPP;
tp = tcp_sk(sk);
if (tp->bpf_chg_cc_inprogress)
return -EBUSY;
tp->bpf_chg_cc_inprogress = 1;
ret = do_tcp_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
KERNEL_SOCKPTR(optval), *optlen);
tp->bpf_chg_cc_inprogress = 0;
return ret;
}
static int sol_tcp_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
{
if (sk->sk_protocol != IPPROTO_TCP)
return -EINVAL;
switch (optname) {
case TCP_NODELAY:
case TCP_MAXSEG:
case TCP_KEEPIDLE:
case TCP_KEEPINTVL:
case TCP_KEEPCNT:
case TCP_SYNCNT:
case TCP_WINDOW_CLAMP:
case TCP_THIN_LINEAR_TIMEOUTS:
case TCP_USER_TIMEOUT:
case TCP_NOTSENT_LOWAT:
case TCP_SAVE_SYN:
if (*optlen != sizeof(int))
return -EINVAL;
break;
case TCP_CONGESTION:
return sol_tcp_sockopt_congestion(sk, optval, optlen, getopt);
case TCP_SAVED_SYN:
if (*optlen < 1)
return -EINVAL;
break;
default:
if (getopt)
return -EINVAL;
return bpf_sol_tcp_setsockopt(sk, optname, optval, *optlen);
}
if (getopt) {
if (optname == TCP_SAVED_SYN) {
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->saved_syn ||
*optlen > tcp_saved_syn_len(tp->saved_syn))
return -EINVAL;
memcpy(optval, tp->saved_syn->data, *optlen);
return 0;
}
return do_tcp_getsockopt(sk, SOL_TCP, optname,
KERNEL_SOCKPTR(optval),
KERNEL_SOCKPTR(optlen));
}
return do_tcp_setsockopt(sk, SOL_TCP, optname,
KERNEL_SOCKPTR(optval), *optlen);
}
static int sol_ip_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
{
if (sk->sk_family != AF_INET)
return -EINVAL;
switch (optname) {
case IP_TOS:
if (*optlen != sizeof(int))
return -EINVAL;
break;
default:
return -EINVAL;
}
if (getopt)
return do_ip_getsockopt(sk, SOL_IP, optname,
KERNEL_SOCKPTR(optval),
KERNEL_SOCKPTR(optlen));
return do_ip_setsockopt(sk, SOL_IP, optname,
KERNEL_SOCKPTR(optval), *optlen);
}
static int sol_ipv6_sockopt(struct sock *sk, int optname,
char *optval, int *optlen,
bool getopt)
{
if (sk->sk_family != AF_INET6)
return -EINVAL;
switch (optname) {
case IPV6_TCLASS:
case IPV6_AUTOFLOWLABEL:
if (*optlen != sizeof(int))
return -EINVAL;
break;
default:
return -EINVAL;
}
if (getopt)
return ipv6_bpf_stub->ipv6_getsockopt(sk, SOL_IPV6, optname,
KERNEL_SOCKPTR(optval),
KERNEL_SOCKPTR(optlen));
return ipv6_bpf_stub->ipv6_setsockopt(sk, SOL_IPV6, optname,
KERNEL_SOCKPTR(optval), *optlen);
}
static int __bpf_setsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{
if (!sk_fullsock(sk))
return -EINVAL;
if (level == SOL_SOCKET)
return sol_socket_sockopt(sk, optname, optval, &optlen, false);
else if (IS_ENABLED(CONFIG_INET) && level == SOL_IP)
return sol_ip_sockopt(sk, optname, optval, &optlen, false);
else if (IS_ENABLED(CONFIG_IPV6) && level == SOL_IPV6)
return sol_ipv6_sockopt(sk, optname, optval, &optlen, false);
else if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP)
return sol_tcp_sockopt(sk, optname, optval, &optlen, false);
return -EINVAL;
}
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{
if (sk_fullsock(sk))
sock_owned_by_me(sk);
return __bpf_setsockopt(sk, level, optname, optval, optlen);
}
static int __bpf_getsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{
int err, saved_optlen = optlen;
if (!sk_fullsock(sk)) {
err = -EINVAL;
goto done;
}
if (level == SOL_SOCKET)
err = sol_socket_sockopt(sk, optname, optval, &optlen, true);
else if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP)
err = sol_tcp_sockopt(sk, optname, optval, &optlen, true);
else if (IS_ENABLED(CONFIG_INET) && level == SOL_IP)
err = sol_ip_sockopt(sk, optname, optval, &optlen, true);
else if (IS_ENABLED(CONFIG_IPV6) && level == SOL_IPV6)
err = sol_ipv6_sockopt(sk, optname, optval, &optlen, true);
else
err = -EINVAL;
done:
if (err)
optlen = 0;
if (optlen < saved_optlen)
memset(optval + optlen, 0, saved_optlen - optlen);
return err;
}
static int _bpf_getsockopt(struct sock *sk, int level, int optname,
char *optval, int optlen)
{
if (sk_fullsock(sk))
sock_owned_by_me(sk);
return __bpf_getsockopt(sk, level, optname, optval, optlen);
}
BPF_CALL_5(bpf_sk_setsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{
return _bpf_setsockopt(sk, level, optname, optval, optlen);
}
const struct bpf_func_proto bpf_sk_setsockopt_proto = {
.func = bpf_sk_setsockopt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE,
};
BPF_CALL_5(bpf_sk_getsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{
return _bpf_getsockopt(sk, level, optname, optval, optlen);
}
const struct bpf_func_proto bpf_sk_getsockopt_proto = {
.func = bpf_sk_getsockopt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_UNINIT_MEM,
.arg5_type = ARG_CONST_SIZE,
};
BPF_CALL_5(bpf_unlocked_sk_setsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{
return __bpf_setsockopt(sk, level, optname, optval, optlen);
}
const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto = {
.func = bpf_unlocked_sk_setsockopt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE,
};
BPF_CALL_5(bpf_unlocked_sk_getsockopt, struct sock *, sk, int, level,
int, optname, char *, optval, int, optlen)
{
return __bpf_getsockopt(sk, level, optname, optval, optlen);
}
const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto = {
.func = bpf_unlocked_sk_getsockopt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_UNINIT_MEM,
.arg5_type = ARG_CONST_SIZE,
};
BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
int, level, int, optname, char *, optval, int, optlen)
{
return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen);
}
static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
.func = bpf_sock_addr_setsockopt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE,
};
BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx,
int, level, int, optname, char *, optval, int, optlen)
{
return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen);
}
static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
.func = bpf_sock_addr_getsockopt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_UNINIT_MEM,
.arg5_type = ARG_CONST_SIZE,
};
BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
}
static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
.func = bpf_sock_ops_setsockopt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE,
};
static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock,
int optname, const u8 **start)
{
struct sk_buff *syn_skb = bpf_sock->syn_skb;
const u8 *hdr_start;
int ret;
if (syn_skb) {
if (optname == TCP_BPF_SYN) {
hdr_start = syn_skb->data;
ret = tcp_hdrlen(syn_skb);
} else if (optname == TCP_BPF_SYN_IP) {
hdr_start = skb_network_header(syn_skb);
ret = skb_network_header_len(syn_skb) +
tcp_hdrlen(syn_skb);
} else {
hdr_start = skb_mac_header(syn_skb);
ret = skb_mac_header_len(syn_skb) +
skb_network_header_len(syn_skb) +
tcp_hdrlen(syn_skb);
}
} else {
struct sock *sk = bpf_sock->sk;
struct saved_syn *saved_syn;
if (sk->sk_state == TCP_NEW_SYN_RECV)
saved_syn = inet_reqsk(sk)->saved_syn;
else
saved_syn = tcp_sk(sk)->saved_syn;
if (!saved_syn)
return -ENOENT;
if (optname == TCP_BPF_SYN) {
hdr_start = saved_syn->data +
saved_syn->mac_hdrlen +
saved_syn->network_hdrlen;
ret = saved_syn->tcp_hdrlen;
} else if (optname == TCP_BPF_SYN_IP) {
hdr_start = saved_syn->data +
saved_syn->mac_hdrlen;
ret = saved_syn->network_hdrlen +
saved_syn->tcp_hdrlen;
} else {
if (!saved_syn->mac_hdrlen)
return -ENOENT;
hdr_start = saved_syn->data;
ret = saved_syn->mac_hdrlen +
saved_syn->network_hdrlen +
saved_syn->tcp_hdrlen;
}
}
*start = hdr_start;
return ret;
}
BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP &&
optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) {
int ret, copy_len = 0;
const u8 *start;
ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start);
if (ret > 0) {
copy_len = ret;
if (optlen < copy_len) {
copy_len = optlen;
ret = -ENOSPC;
}
memcpy(optval, start, copy_len);
}
memset(optval + copy_len, 0, optlen - copy_len);
return ret;
}
return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen);
}
static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = {
.func = bpf_sock_ops_getsockopt,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_UNINIT_MEM,
.arg5_type = ARG_CONST_SIZE,
};
BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
int, argval)
{
struct sock *sk = bpf_sock->sk;
int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
return -EINVAL;
tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
}
static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
.func = bpf_sock_ops_cb_flags_set,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
};
const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
int, addr_len)
{
#ifdef CONFIG_INET
struct sock *sk = ctx->sk;
u32 flags = BIND_FROM_BPF;
int err;
err = -EINVAL;
if (addr_len < offsetofend(struct sockaddr, sa_family))
return err;
if (addr->sa_family == AF_INET) {
if (addr_len < sizeof(struct sockaddr_in))
return err;
if (((struct sockaddr_in *)addr)->sin_port == htons(0))
flags |= BIND_FORCE_ADDRESS_NO_PORT;
return __inet_bind(sk, addr, addr_len, flags);
#if IS_ENABLED(CONFIG_IPV6)
} else if (addr->sa_family == AF_INET6) {
if (addr_len < SIN6_LEN_RFC2133)
return err;
if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0))
flags |= BIND_FORCE_ADDRESS_NO_PORT;
return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags);
#endif /* CONFIG_IPV6 */
}
#endif /* CONFIG_INET */
return -EAFNOSUPPORT;
}
static const struct bpf_func_proto bpf_bind_proto = {
.func = bpf_bind,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
};
#ifdef CONFIG_XFRM
#if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
(IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
struct metadata_dst __percpu *xfrm_bpf_md_dst;
EXPORT_SYMBOL_GPL(xfrm_bpf_md_dst);
#endif
BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
struct bpf_xfrm_state *, to, u32, size, u64, flags)
{
const struct sec_path *sp = skb_sec_path(skb);
const struct xfrm_state *x;
if (!sp || unlikely(index >= sp->len || flags))
goto err_clear;
x = sp->xvec[index];
if (unlikely(size != sizeof(struct bpf_xfrm_state)))
goto err_clear;
to->reqid = x->props.reqid;
to->spi = x->id.spi;
to->family = x->props.family;
to->ext = 0;
if (to->family == AF_INET6) {
memcpy(to->remote_ipv6, x->props.saddr.a6,
sizeof(to->remote_ipv6));
} else {
to->remote_ipv4 = x->props.saddr.a4;
memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
}
return 0;
err_clear:
memset(to, 0, size);
return -EINVAL;
}
static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
.func = bpf_skb_get_xfrm_state,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
.arg4_type = ARG_CONST_SIZE,
.arg5_type = ARG_ANYTHING,
};
#endif
#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, u32 mtu)
{
params->h_vlan_TCI = 0;
params->h_vlan_proto = 0;
if (mtu)
params->mtu_result = mtu;
return 0;
}
#endif
#if IS_ENABLED(CONFIG_INET)
static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
u32 flags, bool check_mtu)
{
struct fib_nh_common *nhc;
struct in_device *in_dev;
struct neighbour *neigh;
struct net_device *dev;
struct fib_result res;
struct flowi4 fl4;
u32 mtu = 0;
int err;
dev = dev_get_by_index_rcu(net, params->ifindex);
if (unlikely(!dev))
return -ENODEV;
in_dev = __in_dev_get_rcu(dev);
if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
fl4.flowi4_iif = 1;
fl4.flowi4_oif = params->ifindex;
} else {
fl4.flowi4_iif = params->ifindex;
fl4.flowi4_oif = 0;
}
fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = 0;
fl4.flowi4_proto = params->l4_protocol;
fl4.daddr = params->ipv4_dst;
fl4.saddr = params->ipv4_src;
fl4.fl4_sport = params->sport;
fl4.fl4_dport = params->dport;
fl4.flowi4_multipath_hash = 0;
if (flags & BPF_FIB_LOOKUP_DIRECT) {
u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
struct fib_table *tb;
if (flags & BPF_FIB_LOOKUP_TBID) {
tbid = params->tbid;
params->tbid = 0;
}
tb = fib_get_table(net, tbid);
if (unlikely(!tb))
return BPF_FIB_LKUP_RET_NOT_FWDED;
err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
} else {
fl4.flowi4_mark = 0;
fl4.flowi4_secid = 0;
fl4.flowi4_tun_key.tun_id = 0;
fl4.flowi4_uid = sock_net_uid(net, NULL);
err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
}
if (err) {
if (err == -EINVAL)
return BPF_FIB_LKUP_RET_BLACKHOLE;
if (err == -EHOSTUNREACH)
return BPF_FIB_LKUP_RET_UNREACHABLE;
if (err == -EACCES)
return BPF_FIB_LKUP_RET_PROHIBIT;
return BPF_FIB_LKUP_RET_NOT_FWDED;
}
if (res.type != RTN_UNICAST)
return BPF_FIB_LKUP_RET_NOT_FWDED;
if (fib_info_num_path(res.fi) > 1)
fib_select_path(net, &res, &fl4, NULL);
if (check_mtu) {
mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
if (params->tot_len > mtu) {
params->mtu_result = mtu;
return BPF_FIB_LKUP_RET_FRAG_NEEDED;
}
}
nhc = res.nhc;
if (nhc->nhc_lwtstate)
return BPF_FIB_LKUP_RET_UNSUPP_LWT;
dev = nhc->nhc_dev;
params->rt_metric = res.fi->fib_priority;
params->ifindex = dev->ifindex;
if (likely(nhc->nhc_gw_family != AF_INET6)) {
if (nhc->nhc_gw_family)
params->ipv4_dst = nhc->nhc_gw.ipv4;
} else {
struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
params->family = AF_INET6;
*dst = nhc->nhc_gw.ipv6;
}
if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
goto set_fwd_params;
if (likely(nhc->nhc_gw_family != AF_INET6))
neigh = __ipv4_neigh_lookup_noref(dev,
(__force u32)params->ipv4_dst);
else
neigh = __ipv6_neigh_lookup_noref_stub(dev, params->ipv6_dst);
if (!neigh || !(READ_ONCE(neigh->nud_state) & NUD_VALID))
return BPF_FIB_LKUP_RET_NO_NEIGH;
memcpy(params->dmac, neigh->ha, ETH_ALEN);
memcpy(params->smac, dev->dev_addr, ETH_ALEN);
set_fwd_params:
return bpf_fib_set_fwd_params(params, mtu);
}
#endif
#if IS_ENABLED(CONFIG_IPV6)
static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
u32 flags, bool check_mtu)
{
struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
struct fib6_result res = {};
struct neighbour *neigh;
struct net_device *dev;
struct inet6_dev *idev;
struct flowi6 fl6;
int strict = 0;
int oif, err;
u32 mtu = 0;
if (rt6_need_strict(dst) || rt6_need_strict(src))
return BPF_FIB_LKUP_RET_NOT_FWDED;
dev = dev_get_by_index_rcu(net, params->ifindex);
if (unlikely(!dev))
return -ENODEV;
idev = __in6_dev_get_safely(dev);
if (unlikely(!idev || !idev->cnf.forwarding))
return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
fl6.flowi6_iif = 1;
oif = fl6.flowi6_oif = params->ifindex;
} else {
oif = fl6.flowi6_iif = params->ifindex;
fl6.flowi6_oif = 0;
strict = RT6_LOOKUP_F_HAS_SADDR;
}
fl6.flowlabel = params->flowinfo;
fl6.flowi6_scope = 0;
fl6.flowi6_flags = 0;
fl6.mp_hash = 0;
fl6.flowi6_proto = params->l4_protocol;
fl6.daddr = *dst;
fl6.saddr = *src;
fl6.fl6_sport = params->sport;
fl6.fl6_dport = params->dport;
if (flags & BPF_FIB_LOOKUP_DIRECT) {
u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
struct fib6_table *tb;
if (flags & BPF_FIB_LOOKUP_TBID) {
tbid = params->tbid;
params->tbid = 0;
}
tb = ipv6_stub->fib6_get_table(net, tbid);
if (unlikely(!tb))
return BPF_FIB_LKUP_RET_NOT_FWDED;
err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
strict);
} else {
fl6.flowi6_mark = 0;
fl6.flowi6_secid = 0;
fl6.flowi6_tun_key.tun_id = 0;
fl6.flowi6_uid = sock_net_uid(net, NULL);
err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
}
if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
res.f6i == net->ipv6.fib6_null_entry))
return BPF_FIB_LKUP_RET_NOT_FWDED;
switch (res.fib6_type) {
case RTN_UNICAST:
break;
case RTN_BLACKHOLE:
return BPF_FIB_LKUP_RET_BLACKHOLE;
case RTN_UNREACHABLE:
return BPF_FIB_LKUP_RET_UNREACHABLE;
case RTN_PROHIBIT:
return BPF_FIB_LKUP_RET_PROHIBIT;
default:
return BPF_FIB_LKUP_RET_NOT_FWDED;
}
ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
fl6.flowi6_oif != 0, NULL, strict);
if (check_mtu) {
mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
if (params->tot_len > mtu) {
params->mtu_result = mtu;
return BPF_FIB_LKUP_RET_FRAG_NEEDED;
}
}
if (res.nh->fib_nh_lws)
return BPF_FIB_LKUP_RET_UNSUPP_LWT;
if (res.nh->fib_nh_gw_family)
*dst = res.nh->fib_nh_gw6;
dev = res.nh->fib_nh_dev;
params->rt_metric = res.f6i->fib6_metric;
params->ifindex = dev->ifindex;
if (flags & BPF_FIB_LOOKUP_SKIP_NEIGH)
goto set_fwd_params;
neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
if (!neigh || !(READ_ONCE(neigh->nud_state) & NUD_VALID))
return BPF_FIB_LKUP_RET_NO_NEIGH;
memcpy(params->dmac, neigh->ha, ETH_ALEN);
memcpy(params->smac, dev->dev_addr, ETH_ALEN);
set_fwd_params:
return bpf_fib_set_fwd_params(params, mtu);
}
#endif
#define BPF_FIB_LOOKUP_MASK (BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT | \
BPF_FIB_LOOKUP_SKIP_NEIGH | BPF_FIB_LOOKUP_TBID)
BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
struct bpf_fib_lookup *, params, int, plen, u32, flags)
{
if (plen < sizeof(*params))
return -EINVAL;
if (flags & ~BPF_FIB_LOOKUP_MASK)
return -EINVAL;
switch (params->family) {
#if IS_ENABLED(CONFIG_INET)
case AF_INET:
return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
flags, true);
#endif
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
flags, true);
#endif
}
return -EAFNOSUPPORT;
}
static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
.func = bpf_xdp_fib_lookup,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};
BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
struct bpf_fib_lookup *, params, int, plen, u32, flags)
{
struct net *net = dev_net(skb->dev);
int rc = -EAFNOSUPPORT;
bool check_mtu = false;
if (plen < sizeof(*params))
return -EINVAL;
if (flags & ~BPF_FIB_LOOKUP_MASK)
return -EINVAL;
if (params->tot_len)
check_mtu = true;
switch (params->family) {
#if IS_ENABLED(CONFIG_INET)
case AF_INET:
rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu);
break;
#endif
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu);
break;
#endif
}
if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) {
struct net_device *dev;
dev = dev_get_by_index_rcu(net, params->ifindex);
if (!is_skb_forwardable(dev, skb))
rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
params->mtu_result = dev->mtu;
}
return rc;
}
static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
.func = bpf_skb_fib_lookup,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};
static struct net_device *__dev_via_ifindex(struct net_device *dev_curr,
u32 ifindex)
{
struct net *netns = dev_net(dev_curr);
if (ifindex == 0)
return dev_curr;
return dev_get_by_index_rcu(netns, ifindex);
}
BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags)
{
int ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
struct net_device *dev = skb->dev;
int skb_len, dev_len;
int mtu;
if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
return -EINVAL;
if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
return -EINVAL;
dev = __dev_via_ifindex(dev, ifindex);
if (unlikely(!dev))
return -ENODEV;
mtu = READ_ONCE(dev->mtu);
dev_len = mtu + dev->hard_header_len;
skb_len = *mtu_len ? *mtu_len + dev->hard_header_len : skb->len;
skb_len += len_diff;
if (skb_len <= dev_len) {
ret = BPF_MTU_CHK_RET_SUCCESS;
goto out;
}
if (skb_is_gso(skb)) {
ret = BPF_MTU_CHK_RET_SUCCESS;
if (flags & BPF_MTU_CHK_SEGS &&
!skb_gso_validate_network_len(skb, mtu))
ret = BPF_MTU_CHK_RET_SEGS_TOOBIG;
}
out:
*mtu_len = mtu;
return ret;
}
BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags)
{
struct net_device *dev = xdp->rxq->dev;
int xdp_len = xdp->data_end - xdp->data;
int ret = BPF_MTU_CHK_RET_SUCCESS;
int mtu, dev_len;
if (unlikely(flags))
return -EINVAL;
dev = __dev_via_ifindex(dev, ifindex);
if (unlikely(!dev))
return -ENODEV;
mtu = READ_ONCE(dev->mtu);
dev_len = mtu + dev->hard_header_len;
if (*mtu_len)
xdp_len = *mtu_len + dev->hard_header_len;
xdp_len += len_diff;
if (xdp_len > dev_len)
ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
*mtu_len = mtu;
return ret;
}
static const struct bpf_func_proto bpf_skb_check_mtu_proto = {
.func = bpf_skb_check_mtu,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type =