#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/tcp.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <asm/unaligned.h>
#include <net/tcp.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
static const char *const tcp_conntrack_names[] = {
"NONE",
"SYN_SENT",
"SYN_RECV",
"ESTABLISHED",
"FIN_WAIT",
"CLOSE_WAIT",
"LAST_ACK",
"TIME_WAIT",
"CLOSE",
"SYN_SENT2",
};
enum nf_ct_tcp_action {
NFCT_TCP_IGNORE,
NFCT_TCP_INVALID,
NFCT_TCP_ACCEPT,
};
#define SECS * HZ
#define MINS * 60 SECS
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
[TCP_CONNTRACK_SYN_SENT] = 2 MINS,
[TCP_CONNTRACK_SYN_RECV] = 60 SECS,
[TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
[TCP_CONNTRACK_FIN_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE_WAIT] = 60 SECS,
[TCP_CONNTRACK_LAST_ACK] = 30 SECS,
[TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE] = 10 SECS,
[TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
[TCP_CONNTRACK_RETRANS] = 5 MINS,
[TCP_CONNTRACK_UNACK] = 5 MINS,
};
#define sNO TCP_CONNTRACK_NONE
#define sSS TCP_CONNTRACK_SYN_SENT
#define sSR TCP_CONNTRACK_SYN_RECV
#define sES TCP_CONNTRACK_ESTABLISHED
#define sFW TCP_CONNTRACK_FIN_WAIT
#define sCW TCP_CONNTRACK_CLOSE_WAIT
#define sLA TCP_CONNTRACK_LAST_ACK
#define sTW TCP_CONNTRACK_TIME_WAIT
#define sCL TCP_CONNTRACK_CLOSE
#define sS2 TCP_CONNTRACK_SYN_SENT2
#define sIV TCP_CONNTRACK_MAX
#define sIG TCP_CONNTRACK_IGNORE
enum tcp_bit_set {
TCP_SYN_SET,
TCP_SYNACK_SET,
TCP_FIN_SET,
TCP_ACK_SET,
TCP_RST_SET,
TCP_NONE_SET,
};
static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
{
{ sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
{ sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
{ sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
{ sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
{ sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
{ sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
},
{
{ sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
{ sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
{ sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
{ sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
{ sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
{ sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
}
};
#ifdef CONFIG_NF_CONNTRACK_PROCFS
static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
{
if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
return;
seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
}
#endif
static unsigned int get_conntrack_index(const struct tcphdr *tcph)
{
if (tcph->rst) return TCP_RST_SET;
else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
else if (tcph->fin) return TCP_FIN_SET;
else if (tcph->ack) return TCP_ACK_SET;
else return TCP_NONE_SET;
}
static inline __u32 segment_seq_plus_len(__u32 seq,
size_t len,
unsigned int dataoff,
const struct tcphdr *tcph)
{
return (seq + len - dataoff - tcph->doff*4
+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
}
#define MAXACKWINCONST 66000
#define MAXACKWINDOW(sender) \
((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
: MAXACKWINCONST)
static void tcp_options(const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
struct ip_ct_tcp_state *state)
{
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
const unsigned char *ptr;
int length = (tcph->doff*4) - sizeof(struct tcphdr);
if (!length)
return;
ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
length, buff);
if (!ptr)
return;
state->td_scale = 0;
state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
while (length > 0) {
int opcode=*ptr++;
int opsize;
switch (opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP:
length--;
continue;
default:
if (length < 2)
return;
opsize=*ptr++;
if (opsize < 2)
return;
if (opsize > length)
return;
if (opcode == TCPOPT_SACK_PERM
&& opsize == TCPOLEN_SACK_PERM)
state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
else if (opcode == TCPOPT_WINDOW
&& opsize == TCPOLEN_WINDOW) {
state->td_scale = *(u_int8_t *)ptr;
if (state->td_scale > TCP_MAX_WSCALE)
state->td_scale = TCP_MAX_WSCALE;
state->flags |=
IP_CT_TCP_FLAG_WINDOW_SCALE;
}
ptr += opsize - 2;
length -= opsize;
}
}
}
static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
const struct tcphdr *tcph, __u32 *sack)
{
unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
const unsigned char *ptr;
int length = (tcph->doff*4) - sizeof(struct tcphdr);
__u32 tmp;
if (!length)
return;
ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
length, buff);
if (!ptr)
return;
if (length == TCPOLEN_TSTAMP_ALIGNED
&& *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
| (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8)
| TCPOLEN_TIMESTAMP))
return;
while (length > 0) {
int opcode = *ptr++;
int opsize, i;
switch (opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP:
length--;
continue;
default:
if (length < 2)
return;
opsize = *ptr++;
if (opsize < 2)
return;
if (opsize > length)
return;
if (opcode == TCPOPT_SACK
&& opsize >= (TCPOLEN_SACK_BASE
+ TCPOLEN_SACK_PERBLOCK)
&& !((opsize - TCPOLEN_SACK_BASE)
% TCPOLEN_SACK_PERBLOCK)) {
for (i = 0;
i < (opsize - TCPOLEN_SACK_BASE);
i += TCPOLEN_SACK_PERBLOCK) {
tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
if (after(tmp, *sack))
*sack = tmp;
}
return;
}
ptr += opsize - 2;
length -= opsize;
}
}
}
static void tcp_init_sender(struct ip_ct_tcp_state *sender,
struct ip_ct_tcp_state *receiver,
const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *tcph,
u32 end, u32 win)
{
sender->td_end =
sender->td_maxend = end;
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
sender->td_scale = 0;
receiver->td_scale = 0;
}
}
__printf(6, 7)
static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb,
const struct nf_conn *ct,
const struct nf_hook_state *state,
const struct ip_ct_tcp_state *sender,
enum nf_ct_tcp_action ret,
const char *fmt, ...)
{
const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
struct va_format vaf;
va_list args;
bool be_liberal;
be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal;
if (be_liberal)
return NFCT_TCP_ACCEPT;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf);
va_end(args);
return ret;
}
static enum nf_ct_tcp_action
tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
unsigned int index, const struct sk_buff *skb,
unsigned int dataoff, const struct tcphdr *tcph,
const struct nf_hook_state *hook_state)
{
struct ip_ct_tcp *state = &ct->proto.tcp;
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
__u32 seq, ack, sack, end, win, swin;
bool in_recv_win, seq_ok;
s32 receiver_offset;
u16 win_raw;
seq = ntohl(tcph->seq);
ack = sack = ntohl(tcph->ack_seq);
win_raw = ntohs(tcph->window);
win = win_raw;
end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
tcp_sack(skb, dataoff, tcph, &sack);
receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
ack -= receiver_offset;
sack -= receiver_offset;
if (sender->td_maxwin == 0) {
if (tcph->syn) {
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
end, win);
if (!tcph->ack)
return NFCT_TCP_ACCEPT;
} else {
sender->td_end = end;
swin = win << sender->td_scale;
sender->td_maxwin = (swin == 0 ? 1 : swin);
sender->td_maxend = end + sender->td_maxwin;
if (receiver->td_maxwin == 0) {
receiver->td_end = receiver->td_maxend = sack;
} else if (sack == receiver->td_end + 1) {
receiver->td_end++;
}
}
} else if (tcph->syn &&
after(end, sender->td_end) &&
(state->state == TCP_CONNTRACK_SYN_SENT ||
state->state == TCP_CONNTRACK_SYN_RECV)) {
tcp_init_sender(sender, receiver,
skb, dataoff, tcph,
end, win);
if (dir == IP_CT_DIR_REPLY && !tcph->ack)
return NFCT_TCP_ACCEPT;
}
if (!(tcph->ack)) {
ack = sack = receiver->td_end;
} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
(TCP_FLAG_ACK|TCP_FLAG_RST))
&& (ack == 0)) {
ack = sack = receiver->td_end;
}
if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
seq = end = sender->td_end;
seq_ok = before(seq, sender->td_maxend + 1);
if (!seq_ok) {
u32 overshot = end - sender->td_maxend + 1;
bool ack_ok;
ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
in_recv_win = receiver->td_maxwin &&
after(end, sender->td_end - receiver->td_maxwin - 1);
if (in_recv_win &&
ack_ok &&
overshot <= receiver->td_maxwin &&
before(sack, receiver->td_end + 1)) {
sender->td_end = end;
sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"%u bytes more than expected", overshot);
}
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
"SEQ is over upper bound %u (over the window of the receiver)",
sender->td_maxend + 1);
}
if (!before(sack, receiver->td_end + 1))
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
"ACK is over upper bound %u (ACKed data not seen yet)",
receiver->td_end + 1);
in_recv_win = !receiver->td_maxwin ||
after(end, sender->td_end - receiver->td_maxwin - 1);
if (!in_recv_win)
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"SEQ is under lower bound %u (already ACKed data retransmitted)",
sender->td_end - receiver->td_maxwin - 1);
if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1))
return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
"ignored ACK under lower bound %u (possible overly delayed)",
receiver->td_end - MAXACKWINDOW(sender) - 1);
if (!tcph->syn)
win <<= sender->td_scale;
swin = win + (sack - ack);
if (sender->td_maxwin < swin)
sender->td_maxwin = swin;
if (after(end, sender->td_end)) {
sender->td_end = end;
sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
}
if (tcph->ack) {
if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
sender->td_maxack = ack;
sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
} else if (after(ack, sender->td_maxack)) {
sender->td_maxack = ack;
}
}
if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
receiver->td_maxwin += end - sender->td_maxend;
if (after(sack + win, receiver->td_maxend - 1)) {
receiver->td_maxend = sack + win;
if (win == 0)
receiver->td_maxend++;
}
if (ack == receiver->td_end)
receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
if (index == TCP_ACK_SET) {
if (state->last_dir == dir &&
state->last_seq == seq &&
state->last_ack == ack &&
state->last_end == end &&
state->last_win == win_raw) {
state->retrans++;
} else {
state->last_dir = dir;
state->last_seq = seq;
state->last_ack = ack;
state->last_end = end;
state->last_win = win_raw;
state->retrans = 0;
}
}
return NFCT_TCP_ACCEPT;
}
static void __cold nf_tcp_handle_invalid(struct nf_conn *ct,
enum ip_conntrack_dir dir,
int index,
const struct sk_buff *skb,
const struct nf_hook_state *hook_state)
{
const unsigned int *timeouts;
const struct nf_tcp_net *tn;
unsigned int timeout;
u32 expires;
if (!test_bit(IPS_ASSURED_BIT, &ct->status) ||
test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
return;
switch (index) {
case TCP_RST_SET:
case TCP_FIN_SET:
break;
default:
return;
}
if (ct->proto.tcp.last_dir != dir &&
(ct->proto.tcp.last_index == TCP_FIN_SET ||
ct->proto.tcp.last_index == TCP_RST_SET)) {
expires = nf_ct_expires(ct);
if (expires < 120 * HZ)
return;
tn = nf_tcp_pernet(nf_ct_net(ct));
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = tn->timeouts;
timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]);
if (expires > timeout) {
nf_ct_l4proto_log_invalid(skb, ct, hook_state,
"packet (index %d, dir %d) response for index %d lower timeout to %u",
index, dir, ct->proto.tcp.last_index, timeout);
WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
}
} else {
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
}
}
static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
TCPHDR_URG) + 1] =
{
[TCPHDR_SYN] = 1,
[TCPHDR_SYN|TCPHDR_URG] = 1,
[TCPHDR_SYN|TCPHDR_ACK] = 1,
[TCPHDR_RST] = 1,
[TCPHDR_RST|TCPHDR_ACK] = 1,
[TCPHDR_FIN|TCPHDR_ACK] = 1,
[TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
[TCPHDR_ACK] = 1,
[TCPHDR_ACK|TCPHDR_URG] = 1,
};
static void tcp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,
const char *msg)
{
nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
}
static bool tcp_error(const struct tcphdr *th,
struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state)
{
unsigned int tcplen = skb->len - dataoff;
u8 tcpflags;
if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
tcp_error_log(skb, state, "truncated packet");
return true;
}
if (state->net->ct.sysctl_checksum &&
state->hook == NF_INET_PRE_ROUTING &&
nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
tcp_error_log(skb, state, "bad checksum");
return true;
}
tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
if (!tcp_valid_flags[tcpflags]) {
tcp_error_log(skb, state, "invalid tcp flag combination");
return true;
}
return false;
}
static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff,
const struct tcphdr *th)
{
enum tcp_conntrack new_state;
struct net *net = nf_ct_net(ct);
const struct nf_tcp_net *tn = nf_tcp_pernet(net);
new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
if (new_state >= TCP_CONNTRACK_MAX) {
pr_debug("nf_ct_tcp: invalid new deleting.\n");
return false;
}
if (new_state == TCP_CONNTRACK_SYN_SENT) {
memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
dataoff, th);
ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
if (ct->proto.tcp.seen[0].td_maxwin == 0)
ct->proto.tcp.seen[0].td_maxwin = 1;
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end;
tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
} else if (tn->tcp_loose == 0) {
return false;
} else {
memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
ct->proto.tcp.seen[0].td_end =
segment_seq_plus_len(ntohl(th->seq), skb->len,
dataoff, th);
ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
if (ct->proto.tcp.seen[0].td_maxwin == 0)
ct->proto.tcp.seen[0].td_maxwin = 1;
ct->proto.tcp.seen[0].td_maxend =
ct->proto.tcp.seen[0].td_end +
ct->proto.tcp.seen[0].td_maxwin;
ct->proto.tcp.seen[0].flags =
ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
IP_CT_TCP_FLAG_BE_LIBERAL;
}
ct->proto.tcp.last_index = TCP_NONE_SET;
return true;
}
static bool tcp_can_early_drop(const struct nf_conn *ct)
{
switch (ct->proto.tcp.state) {
case TCP_CONNTRACK_FIN_WAIT:
case TCP_CONNTRACK_LAST_ACK:
case TCP_CONNTRACK_TIME_WAIT:
case TCP_CONNTRACK_CLOSE:
case TCP_CONNTRACK_CLOSE_WAIT:
return true;
default:
break;
}
return false;
}
void nf_conntrack_tcp_set_closing(struct nf_conn *ct)
{
enum tcp_conntrack old_state;
const unsigned int *timeouts;
u32 timeout;
if (!nf_ct_is_confirmed(ct))
return;
spin_lock_bh(&ct->lock);
old_state = ct->proto.tcp.state;
ct->proto.tcp.state = TCP_CONNTRACK_CLOSE;
if (old_state == TCP_CONNTRACK_CLOSE ||
test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) {
spin_unlock_bh(&ct->lock);
return;
}
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts) {
const struct nf_tcp_net *tn;
tn = nf_tcp_pernet(nf_ct_net(ct));
timeouts = tn->timeouts;
}
timeout = timeouts[TCP_CONNTRACK_CLOSE];
WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
spin_unlock_bh(&ct->lock);
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
}
static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
{
state->td_end = 0;
state->td_maxend = 0;
state->td_maxwin = 0;
state->td_maxack = 0;
state->td_scale = 0;
state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
}
int nf_conntrack_tcp_packet(struct nf_conn *ct,
struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
struct nf_tcp_net *tn = nf_tcp_pernet(net);
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
enum nf_ct_tcp_action res;
enum ip_conntrack_dir dir;
const struct tcphdr *th;
struct tcphdr _tcph;
unsigned long timeout;
th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
if (th == NULL)
return -NF_ACCEPT;
if (tcp_error(th, skb, dataoff, state))
return -NF_ACCEPT;
if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th))
return -NF_ACCEPT;
spin_lock_bh(&ct->lock);
old_state = ct->proto.tcp.state;
dir = CTINFO2DIR(ctinfo);
index = get_conntrack_index(th);
new_state = tcp_conntracks[dir][index][old_state];
switch (new_state) {
case TCP_CONNTRACK_SYN_SENT:
if (old_state < TCP_CONNTRACK_TIME_WAIT)
break;
if (((ct->proto.tcp.seen[dir].flags
| ct->proto.tcp.seen[!dir].flags)
& IP_CT_TCP_FLAG_CLOSE_INIT)
|| (ct->proto.tcp.last_dir == dir
&& ct->proto.tcp.last_index == TCP_RST_SET)) {
spin_unlock_bh(&ct->lock);
if (nf_ct_kill(ct))
return -NF_REPEAT;
return NF_DROP;
}
fallthrough;
case TCP_CONNTRACK_IGNORE:
if (index == TCP_SYNACK_SET
&& ct->proto.tcp.last_index == TCP_SYN_SET
&& ct->proto.tcp.last_dir != dir
&& ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
old_state = TCP_CONNTRACK_SYN_SENT;
new_state = TCP_CONNTRACK_SYN_RECV;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
ct->proto.tcp.last_end;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
ct->proto.tcp.last_end;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
ct->proto.tcp.last_win == 0 ?
1 : ct->proto.tcp.last_win;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
ct->proto.tcp.last_wscale;
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
ct->proto.tcp.last_flags;
nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
break;
}
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
ct->proto.tcp.last_seq = ntohl(th->seq);
ct->proto.tcp.last_end =
segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
ct->proto.tcp.last_win = ntohs(th->window);
if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
struct ip_ct_tcp_state seen = {};
ct->proto.tcp.last_flags =
ct->proto.tcp.last_wscale = 0;
tcp_options(skb, dataoff, th, &seen);
if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
ct->proto.tcp.last_flags |=
IP_CT_TCP_FLAG_WINDOW_SCALE;
ct->proto.tcp.last_wscale = seen.td_scale;
}
if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
ct->proto.tcp.last_flags |=
IP_CT_TCP_FLAG_SACK_PERM;
}
if (old_state == TCP_CONNTRACK_LAST_ACK)
ct->proto.tcp.last_flags |=
IP_CT_EXP_CHALLENGE_ACK;
}
if (old_state == TCP_CONNTRACK_SYN_SENT &&
index == TCP_ACK_SET &&
dir == IP_CT_DIR_REPLY)
ct->proto.tcp.last_ack = ntohl(th->ack_seq);
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state,
"packet (index %d) in dir %d ignored, state %s",
index, dir,
tcp_conntrack_names[old_state]);
return NF_ACCEPT;
case TCP_CONNTRACK_MAX:
if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
spin_unlock_bh(&ct->lock);
return NF_ACCEPT;
}
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state,
"packet (index %d) in dir %d invalid, state %s",
index, dir,
tcp_conntrack_names[old_state]);
return -NF_ACCEPT;
case TCP_CONNTRACK_TIME_WAIT:
if (old_state == TCP_CONNTRACK_LAST_ACK &&
index == TCP_ACK_SET &&
ct->proto.tcp.last_dir != dir &&
ct->proto.tcp.last_index == TCP_SYN_SET &&
(ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
return NF_ACCEPT;
}
break;
case TCP_CONNTRACK_SYN_SENT2:
ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
break;
case TCP_CONNTRACK_SYN_RECV:
if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
new_state = TCP_CONNTRACK_ESTABLISHED;
break;
case TCP_CONNTRACK_CLOSE:
if (index != TCP_RST_SET)
break;
if (tcp_can_early_drop(ct))
goto in_window;
if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
ct->proto.tcp.last_index != TCP_SYN_SET) {
u32 seq = ntohl(th->seq);
if (seq == 0 && !nf_conntrack_tcp_established(ct))
break;
if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
!tn->tcp_ignore_invalid_rst) {
spin_unlock_bh(&ct->lock);
nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
return -NF_ACCEPT;
}
if (!nf_conntrack_tcp_established(ct) ||
seq == ct->proto.tcp.seen[!dir].td_maxack)
break;
if (ct->proto.tcp.last_index == TCP_ACK_SET &&
ct->proto.tcp.last_dir == dir &&
seq == ct->proto.tcp.last_end)
break;
new_state = old_state;
}
if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_SYN_SET)
|| (!test_bit(IPS_ASSURED_BIT, &ct->status)
&& ct->proto.tcp.last_index == TCP_ACK_SET))
&& ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
goto in_window;
}
if (old_state == TCP_CONNTRACK_SYN_SENT &&
ct->proto.tcp.last_index == TCP_ACK_SET &&
ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
ntohl(th->seq) == ct->proto.tcp.last_ack)
goto in_window;
break;
default:
break;
}
res = tcp_in_window(ct, dir, index,
skb, dataoff, th, state);
switch (res) {
case NFCT_TCP_IGNORE:
spin_unlock_bh(&ct->lock);
return NF_ACCEPT;
case NFCT_TCP_INVALID:
nf_tcp_handle_invalid(ct, dir, index, skb, state);
spin_unlock_bh(&ct->lock);
return -NF_ACCEPT;
case NFCT_TCP_ACCEPT:
break;
}
in_window:
ct->proto.tcp.last_index = index;
ct->proto.tcp.last_dir = dir;
ct->proto.tcp.state = new_state;
if (old_state != new_state
&& new_state == TCP_CONNTRACK_FIN_WAIT)
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = tn->timeouts;
if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else if (unlikely(index == TCP_RST_SET))
timeout = timeouts[TCP_CONNTRACK_CLOSE];
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
else if (ct->proto.tcp.last_win == 0 &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else
timeout = timeouts[new_state];
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
if (th->rst) {
nf_ct_kill_acct(ct, ctinfo, skb);
return NF_ACCEPT;
}
if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
return NF_ACCEPT;
}
if (new_state == TCP_CONNTRACK_ESTABLISHED &&
timeout > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
} else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
&& (old_state == TCP_CONNTRACK_SYN_RECV
|| old_state == TCP_CONNTRACK_ESTABLISHED)
&& new_state == TCP_CONNTRACK_ESTABLISHED) {
set_bit(IPS_ASSURED_BIT, &ct->status);
nf_conntrack_event_cache(IPCT_ASSURED, ct);
}
nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
return NF_ACCEPT;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct, bool destroy)
{
struct nlattr *nest_parms;
struct nf_ct_tcp_flags tmp = {};
spin_lock_bh(&ct->lock);
nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
if (!nest_parms)
goto nla_put_failure;
if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state))
goto nla_put_failure;
if (destroy)
goto skip_state;
if (nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
ct->proto.tcp.seen[0].td_scale) ||
nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
ct->proto.tcp.seen[1].td_scale))
goto nla_put_failure;
tmp.flags = ct->proto.tcp.seen[0].flags;
if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
sizeof(struct nf_ct_tcp_flags), &tmp))
goto nla_put_failure;
tmp.flags = ct->proto.tcp.seen[1].flags;
if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
sizeof(struct nf_ct_tcp_flags), &tmp))
goto nla_put_failure;
skip_state:
spin_unlock_bh(&ct->lock);
nla_nest_end(skb, nest_parms);
return 0;
nla_put_failure:
spin_unlock_bh(&ct->lock);
return -1;
}
static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
[CTA_PROTOINFO_TCP_STATE] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_WSCALE_REPLY] = { .type = NLA_U8 },
[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL] = { .len = sizeof(struct nf_ct_tcp_flags) },
[CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) },
};
#define TCP_NLATTR_SIZE ( \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
{
struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
int err;
if (!pattr)
return 0;
err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
tcp_nla_policy, NULL);
if (err < 0)
return err;
if (tb[CTA_PROTOINFO_TCP_STATE] &&
nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
return -EINVAL;
spin_lock_bh(&ct->lock);
if (tb[CTA_PROTOINFO_TCP_STATE])
ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
struct nf_ct_tcp_flags *attr =
nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
ct->proto.tcp.seen[0].flags &= ~attr->mask;
ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
}
if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
struct nf_ct_tcp_flags *attr =
nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
ct->proto.tcp.seen[1].flags &= ~attr->mask;
ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
}
if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
ct->proto.tcp.seen[0].td_scale =
nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
ct->proto.tcp.seen[1].td_scale =
nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
}
spin_unlock_bh(&ct->lock);
return 0;
}
static unsigned int tcp_nlattr_tuple_size(void)
{
static unsigned int size __read_mostly;
if (!size)
size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
return size;
}
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
struct nf_tcp_net *tn = nf_tcp_pernet(net);
unsigned int *timeouts = data;
int i;
if (!timeouts)
timeouts = tn->timeouts;
for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
timeouts[i] = tn->timeouts[i];
if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
timeouts[TCP_CONNTRACK_SYN_SENT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
timeouts[TCP_CONNTRACK_SYN_RECV] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
timeouts[TCP_CONNTRACK_ESTABLISHED] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
timeouts[TCP_CONNTRACK_FIN_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
timeouts[TCP_CONNTRACK_LAST_ACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
timeouts[TCP_CONNTRACK_TIME_WAIT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
timeouts[TCP_CONNTRACK_CLOSE] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
timeouts[TCP_CONNTRACK_SYN_SENT2] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
timeouts[TCP_CONNTRACK_RETRANS] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_UNACK]) {
timeouts[TCP_CONNTRACK_UNACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
}
timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
return 0;
}
static int
tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
{
const unsigned int *timeouts = data;
if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
goto nla_put_failure;
return 0;
nla_put_failure:
return -ENOSPC;
}
static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
[CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
void nf_conntrack_tcp_init_net(struct net *net)
{
struct nf_tcp_net *tn = nf_tcp_pernet(net);
int i;
for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
tn->timeouts[i] = tcp_timeouts[i];
tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
tn->tcp_loose = 1;
tn->tcp_be_liberal = 0;
tn->tcp_ignore_invalid_rst = 0;
tn->tcp_max_retrans = 3;
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
tn->offload_timeout = 30 * HZ;
#endif
}
const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
{
.l4proto = IPPROTO_TCP,
#ifdef CONFIG_NF_CONNTRACK_PROCFS
.print_conntrack = tcp_print_conntrack,
#endif
.can_early_drop = tcp_can_early_drop,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.to_nlattr = tcp_to_nlattr,
.from_nlattr = nlattr_to_tcp,
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nlattr_size = TCP_NLATTR_SIZE,
.nla_policy = nf_ct_port_nla_policy,
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
.nlattr_max = CTA_TIMEOUT_TCP_MAX,
.obj_size = sizeof(unsigned int) *
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
}