#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <net/tcp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
static void mangle_contents(struct sk_buff *skb,
unsigned int dataoff,
unsigned int match_offset,
unsigned int match_len,
const char *rep_buffer,
unsigned int rep_len)
{
unsigned char *data;
SKB_LINEAR_ASSERT(skb);
data = skb_network_header(skb) + dataoff;
memmove(data + match_offset + rep_len,
data + match_offset + match_len,
skb_tail_pointer(skb) - (skb_network_header(skb) + dataoff +
match_offset + match_len));
memcpy(data + match_offset, rep_buffer, rep_len);
if (rep_len > match_len) {
pr_debug("nf_nat_mangle_packet: Extending packet by "
"%u from %u bytes\n", rep_len - match_len, skb->len);
skb_put(skb, rep_len - match_len);
} else {
pr_debug("nf_nat_mangle_packet: Shrinking packet from "
"%u from %u bytes\n", match_len - rep_len, skb->len);
__skb_trim(skb, skb->len + rep_len - match_len);
}
if (nf_ct_l3num((struct nf_conn *)skb_nfct(skb)) == NFPROTO_IPV4) {
ip_hdr(skb)->tot_len = htons(skb->len);
ip_send_check(ip_hdr(skb));
} else
ipv6_hdr(skb)->payload_len =
htons(skb->len - sizeof(struct ipv6hdr));
}
static bool enlarge_skb(struct sk_buff *skb, unsigned int extra)
{
if (skb->len + extra > 65535)
return false;
if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
return false;
return true;
}
bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int match_offset,
unsigned int match_len,
const char *rep_buffer,
unsigned int rep_len, bool adjust)
{
struct tcphdr *tcph;
int oldlen, datalen;
if (skb_ensure_writable(skb, skb->len))
return false;
if (rep_len > match_len &&
rep_len - match_len > skb_tailroom(skb) &&
!enlarge_skb(skb, rep_len - match_len))
return false;
tcph = (void *)skb->data + protoff;
oldlen = skb->len - protoff;
mangle_contents(skb, protoff + tcph->doff*4,
match_offset, match_len, rep_buffer, rep_len);
datalen = skb->len - protoff;
nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_TCP,
tcph, &tcph->check, datalen, oldlen);
if (adjust && rep_len != match_len)
nf_ct_seqadj_set(ct, ctinfo, tcph->seq,
(int)rep_len - (int)match_len);
return true;
}
EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
bool
nf_nat_mangle_udp_packet(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int protoff,
unsigned int match_offset,
unsigned int match_len,
const char *rep_buffer,
unsigned int rep_len)
{
struct udphdr *udph;
int datalen, oldlen;
if (skb_ensure_writable(skb, skb->len))
return false;
if (rep_len > match_len &&
rep_len - match_len > skb_tailroom(skb) &&
!enlarge_skb(skb, rep_len - match_len))
return false;
udph = (void *)skb->data + protoff;
oldlen = skb->len - protoff;
mangle_contents(skb, protoff + sizeof(*udph),
match_offset, match_len, rep_buffer, rep_len);
datalen = skb->len - protoff;
udph->len = htons(datalen);
if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
return true;
nf_nat_csum_recalc(skb, nf_ct_l3num(ct), IPPROTO_UDP,
udph, &udph->check, datalen, oldlen);
return true;
}
EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
void nf_nat_follow_master(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
struct nf_nat_range2 range;
BUG_ON(ct->status & IPS_NAT_DONE_MASK);
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_addr = range.max_addr
= ct->master->tuplehash[!exp->dir].tuple.dst.u3;
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
range.min_proto = range.max_proto = exp->saved_proto;
range.min_addr = range.max_addr
= ct->master->tuplehash[!exp->dir].tuple.src.u3;
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
}
EXPORT_SYMBOL(nf_nat_follow_master);
u16 nf_nat_exp_find_port(struct nf_conntrack_expect *exp, u16 port)
{
static const unsigned int max_attempts = 128;
int range, attempts_left;
u16 min = port;
range = USHRT_MAX - port;
attempts_left = range;
if (attempts_left > max_attempts)
attempts_left = max_attempts;
for (;;) {
int res;
exp->tuple.dst.u.tcp.port = htons(port);
res = nf_ct_expect_related(exp, 0);
if (res == 0)
return port;
if (res != -EBUSY || (--attempts_left < 0))
break;
port = min + get_random_u32_below(range);
}
return 0;
}
EXPORT_SYMBOL_GPL