#define pr_fmt(fmt) "ch_ipsec: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/highmem.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
#include <net/esp.h>
#include <net/xfrm.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/authenc.h>
#include <crypto/internal/aead.h>
#include <crypto/null.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aead.h>
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
#include "chcr_ipsec.h"
#define MAX_IMM_TX_PKT_LEN 256
#define GCM_ESP_IV_SIZE 8
static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex);
static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
.xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
.xdo_dev_state_free = ch_ipsec_xfrm_free_state,
.xdo_dev_offload_ok = ch_ipsec_offload_ok,
.xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
static struct cxgb4_uld_info ch_ipsec_uld_info = {
.name = CHIPSEC_DRV_MODULE_NAME,
.add = ch_ipsec_uld_add,
.state_change = ch_ipsec_uld_state_change,
.tx_handler = ch_ipsec_xmit,
.xfrmdev_ops = &ch_ipsec_xfrmdev_ops,
};
static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop)
{
struct ipsec_uld_ctx *u_ctx;
pr_info_once("%s - version %s\n", CHIPSEC_DRV_DESC,
CHIPSEC_DRV_VERSION);
u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
if (!u_ctx) {
u_ctx = ERR_PTR(-ENOMEM);
goto out;
}
u_ctx->lldi = *infop;
out:
return u_ctx;
}
static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct ipsec_uld_ctx *u_ctx = handle;
pr_debug("new_state %u\n", new_state);
switch (new_state) {
case CXGB4_STATE_UP:
pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
mutex_lock(&dev_mutex);
list_add_tail(&u_ctx->entry, &uld_ctx_list);
mutex_unlock(&dev_mutex);
break;
case CXGB4_STATE_START_RECOVERY:
case CXGB4_STATE_DOWN:
case CXGB4_STATE_DETACH:
pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
list_del(&u_ctx->entry);
break;
default:
break;
}
return 0;
}
static int ch_ipsec_setauthsize(struct xfrm_state *x,
struct ipsec_sa_entry *sa_entry)
{
int hmac_ctrl;
int authsize = x->aead->alg_icv_len / 8;
sa_entry->authsize = authsize;
switch (authsize) {
case ICV_8:
hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
break;
case ICV_12:
hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
break;
case ICV_16:
hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
break;
default:
return -EINVAL;
}
return hmac_ctrl;
}
static int ch_ipsec_setkey(struct xfrm_state *x,
struct ipsec_sa_entry *sa_entry)
{
int keylen = (x->aead->alg_key_len + 7) / 8;
unsigned char *key = x->aead->alg_key;
int ck_size, key_ctx_size = 0;
unsigned char ghash_h[AEAD_H_SIZE];
struct crypto_aes_ctx aes;
int ret = 0;
if (keylen > 3) {
keylen -= 4;
memcpy(sa_entry->salt, key + keylen, 4);
}
if (keylen == AES_KEYSIZE_128) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
} else if (keylen == AES_KEYSIZE_192) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
} else if (keylen == AES_KEYSIZE_256) {
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
} else {
pr_err("GCM: Invalid key length %d\n", keylen);
ret = -EINVAL;
goto out;
}
memcpy(sa_entry->key, key, keylen);
sa_entry->enckey_len = keylen;
key_ctx_size = sizeof(struct _key_ctx) +
((DIV_ROUND_UP(keylen, 16)) << 4) +
AEAD_H_SIZE;
sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
CHCR_KEYCTX_MAC_KEY_SIZE_128,
0, 0,
key_ctx_size >> 4);
ret = aes_expandkey(&aes, key, keylen);
if (ret) {
sa_entry->enckey_len = 0;
goto out;
}
memset(ghash_h, 0, AEAD_H_SIZE);
aes_encrypt(&aes, ghash_h, ghash_h);
memzero_explicit(&aes, sizeof(aes));
memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
16), ghash_h, AEAD_H_SIZE);
sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
AEAD_H_SIZE;
out:
return ret;
}
static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
int res = 0;
if (x->props.aalgo != SADB_AALG_NONE) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
return -EINVAL;
}
if (x->props.calgo != SADB_X_CALG_NONE) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
return -EINVAL;
}
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm state offloaded");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm offload");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state offloaded");
return -EINVAL;
}
if (x->encap) {
NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state not offloaded");
return -EINVAL;
}
if (!x->aead) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128 &&
x->aead->alg_icv_len != 96) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 96b & 128b");
return -EINVAL;
}
if ((x->aead->alg_key_len != 128 + 32) &&
(x->aead->alg_key_len != 256 + 32)) {
NL_SET_ERR_MSG_MOD(extack, "cannot offload xfrm states with AEAD key length other than 128/256 bit");
return -EINVAL;
}
if (x->tfcpad) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
return -EINVAL;
}
if (!x->geniv) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
return -EINVAL;
}
if (strcmp(x->geniv, "seqiv")) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
return -EINVAL;
}
if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload");
return -EINVAL;
}
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (!sa_entry) {
res = -ENOMEM;
goto out;
}
sa_entry->hmac_ctrl = ch_ipsec_setauthsize(x, sa_entry);
if (x->props.flags & XFRM_STATE_ESN)
sa_entry->esn = 1;
ch_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
try_module_get(THIS_MODULE);
out:
return res;
}
static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
{
if (!x->xso.offload_handle)
return;
}
static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
if (!x->xso.offload_handle)
return;
sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
kfree(sa_entry);
module_put(THIS_MODULE);
}
static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
if (x->props.family == AF_INET) {
if (ip_hdr(skb)->ihl > 5)
return false;
} else {
if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
return false;
}
return true;
}
static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
if (!x->xso.offload_handle)
return;
}
static int is_eth_imm(const struct sk_buff *skb,
struct ipsec_sa_entry *sa_entry)
{
unsigned int kctx_len;
int hdrlen;
kctx_len = sa_entry->kctx_len;
hdrlen = sizeof(struct fw_ulptx_wr) +
sizeof(struct chcr_ipsec_req) + kctx_len;
hdrlen += sizeof(struct cpl_tx_pkt);
if (sa_entry->esn)
hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
<< 4);
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
}
static unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
struct ipsec_sa_entry *sa_entry,
bool *immediate)
{
unsigned int kctx_len;
unsigned int flits;
int aadivlen;
int hdrlen;
kctx_len = sa_entry->kctx_len;
hdrlen = is_eth_imm(skb, sa_entry);
aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
16) : 0;
aadivlen <<= 4;
if (hdrlen) {
*immediate = true;
return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
}
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
flits += (sizeof(struct fw_ulptx_wr) +
sizeof(struct chcr_ipsec_req) +
kctx_len +
sizeof(struct cpl_tx_pkt_core) +
aadivlen) / sizeof(__be64);
return flits;
}
static void *copy_esn_pktxt(struct sk_buff *skb,
struct net_device *dev,
void *pos,
struct ipsec_sa_entry *sa_entry)
{
struct chcr_ipsec_aadiv *aadiv;
struct ulptx_idata *sc_imm;
struct ip_esp_hdr *esphdr;
struct xfrm_offload *xo;
struct sge_eth_txq *q;
struct adapter *adap;
struct port_info *pi;
__be64 seqno;
u32 qidx;
u32 seqlo;
u8 *iv;
int eoq;
int len;
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + pi->first_qset];
eoq = (void *)q->q.stat - pos;
if (!eoq)
pos = q->q.desc;
len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
memset(pos, 0, len);
aadiv = (struct chcr_ipsec_aadiv *)pos;
esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
xo = xfrm_offload(skb);
aadiv->spi = (esphdr->spi);
seqlo = ntohl(esphdr->seq_no);
seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
memcpy(aadiv->seq_no, &seqno, 8);
iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
memcpy(aadiv->iv, iv, 8);
if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
sc_imm = (struct ulptx_idata *)(pos +
(DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
sizeof(__be64)) << 3));
sc_imm->cmd_more = FILL_CMD_MORE(0);
sc_imm->len = cpu_to_be32(skb->len);
}
pos += len;
return pos;
}
static void *copy_cpltx_pktxt(struct sk_buff *skb,
struct net_device *dev,
void *pos,
struct ipsec_sa_entry *sa_entry)
{
struct cpl_tx_pkt_core *cpl;
struct sge_eth_txq *q;
struct adapter *adap;
struct port_info *pi;
u32 ctrl0, qidx;
u64 cntrl = 0;
int left;
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + pi->first_qset];
left = (void *)q->q.stat - pos;
if (!left)
pos = q->q.desc;
cpl = (struct cpl_tx_pkt_core *)pos;
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
TXPKT_PF_V(adap->pf);
if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
}
cpl->ctrl0 = htonl(ctrl0);
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
pos += sizeof(struct cpl_tx_pkt_core);
if (sa_entry->esn)
pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
return pos;
}
static void *copy_key_cpltx_pktxt(struct sk_buff *skb,
struct net_device *dev,
void *pos,
struct ipsec_sa_entry *sa_entry)
{
struct _key_ctx *key_ctx;
int left, eoq, key_len;
struct sge_eth_txq *q;
struct adapter *adap;
struct port_info *pi;
unsigned int qidx;
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + pi->first_qset];
key_len = sa_entry->kctx_len;
eoq = (void *)q->q.stat - pos;
left = eoq;
if (!eoq) {
pos = q->q.desc;
left = 64 * q->q.size;
}
key_ctx = (struct _key_ctx *)pos;
key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
pos += sizeof(struct _key_ctx);
left -= sizeof(struct _key_ctx);
if (likely(key_len <= left)) {
memcpy(key_ctx->key, sa_entry->key, key_len);
pos += key_len;
} else {
memcpy(pos, sa_entry->key, left);
memcpy(q->q.desc, sa_entry->key + left,
key_len - left);
pos = (u8 *)q->q.desc + (key_len - left);
}
pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
return pos;
}
static void *ch_ipsec_crypto_wreq(struct sk_buff *skb,
struct net_device *dev,
void *pos,
int credits,
struct ipsec_sa_entry *sa_entry)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
unsigned int ivsize = GCM_ESP_IV_SIZE;
struct chcr_ipsec_wr *wr;
bool immediate = false;
u16 immdatalen = 0;
unsigned int flits;
u32 ivinoffset;
u32 aadstart;
u32 aadstop;
u32 ciphstart;
u16 sc_more = 0;
u32 ivdrop = 0;
u32 esnlen = 0;
u32 wr_mid;
u16 ndesc;
int qidx = skb_get_queue_mapping(skb);
struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
unsigned int kctx_len = sa_entry->kctx_len;
int qid = q->q.cntxt_id;
atomic_inc(&adap->ch_ipsec_stats.ipsec_cnt);
flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
ndesc = DIV_ROUND_UP(flits, 2);
if (sa_entry->esn)
ivdrop = 1;
if (immediate)
immdatalen = skb->len;
if (sa_entry->esn) {
esnlen = sizeof(struct chcr_ipsec_aadiv);
if (!skb_is_nonlinear(skb))
sc_more = 1;
}
wr = (struct chcr_ipsec_wr *)pos;
wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
netif_tx_stop_queue(q->txq);
q->q.stops++;
if (!q->dbqt)
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr_mid |= FW_ULPTX_WR_DATA_F;
wr->wreq.flowid_len16 = htonl(wr_mid);
wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
wr->req.ulptx.len = htonl(ndesc - 1);
wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
sizeof(wr->req.key_ctx) +
kctx_len +
sizeof(struct cpl_tx_pkt_core) +
esnlen +
(esnlen ? 0 : immdatalen));
ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
(skb_transport_offset(skb) +
sizeof(struct ip_esp_hdr) + 1);
wr->req.sec_cpl.op_ivinsrtofst = htonl(
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(2) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(
ivinoffset));
wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
(skb_transport_offset(skb) +
sizeof(struct ip_esp_hdr));
ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
GCM_ESP_IV_SIZE + 1;
ciphstart += sa_entry->esn ? esnlen : 0;
wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
aadstart,
aadstop,
ciphstart, 0);
wr->req.sec_cpl.cipherstop_lo_authinsert =
FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
sa_entry->authsize,
sa_entry->authsize);
wr->req.sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
CHCR_SCMD_AUTH_MODE_GHASH,
sa_entry->hmac_ctrl,
ivsize >> 1);
wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
0, ivdrop, 0);
pos += sizeof(struct fw_ulptx_wr) +
sizeof(struct ulp_txpkt) +
sizeof(struct ulptx_idata) +
sizeof(struct cpl_tx_sec_pdu);
pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
return pos;
}
static unsigned int flits_to_desc(unsigned int n)
{
WARN_ON(n > SGE_MAX_WR_LEN / 8);
return DIV_ROUND_UP(n, 8);
}
static unsigned int txq_avail(const struct sge_txq *q)
{
return q->size - 1 - q->in_use;
}
static void eth_txq_stop(struct sge_eth_txq *q)
{
netif_tx_stop_queue(q->txq);
q->q.stops++;
}
static void txq_advance(struct sge_txq *q, unsigned int n)
{
q->in_use += n;
q->pidx += n;
if (q->pidx >= q->size)
q->pidx -= q->size;
}
int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_state *x = xfrm_input_state(skb);
unsigned int last_desc, ndesc, flits = 0;
struct ipsec_sa_entry *sa_entry;
u64 *pos, *end, *before, *sgl;
struct tx_sw_desc *sgl_sdesc;
int qidx, left, credits;
bool immediate = false;
struct sge_eth_txq *q;
struct adapter *adap;
struct port_info *pi;
struct sec_path *sp;
if (!x->xso.offload_handle)
return NETDEV_TX_BUSY;
sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
sp = skb_sec_path(skb);
if (sp->len != 1) {
out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
pi = netdev_priv(dev);
adap = pi->adapter;
qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + pi->first_qset];
cxgb4_reclaim_completed_tx(adap, &q->q, true);
flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
eth_txq_stop(q);
dev_err(adap->pdev_dev,
"%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
dev->name, qidx, credits, ndesc, txq_avail(&q->q),
flits);
return NETDEV_TX_BUSY;
}
last_desc = q->q.pidx + ndesc - 1;
if (last_desc >= q->q.size)
last_desc -= q->q.size;
sgl_sdesc = &q->q.sdesc[last_desc];
if (!immediate &&
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
goto out_free;
}
pos = (u64 *)&q->q.desc[q->q.pidx];
before = (u64 *)pos;
end = (u64 *)pos + flits;
pos = (void *)ch_ipsec_crypto_wreq(skb, dev, (void *)pos,
credits, sa_entry);
if (before > (u64 *)pos) {
left = (u8 *)end - (u8 *)q->q.stat;
end = (void *)q->q.desc + left;
}
if (pos == (u64 *)q->q.stat) {
left = (u8 *)end - (u8 *)q->q.stat;
end = (void *)q->q.desc + left;
pos = (void *)q->q.desc;
}
sgl = (void *)pos;
if (immediate) {
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
0, sgl_sdesc->addr);
skb_orphan(skb);
sgl_sdesc->skb = skb;
}
txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
return NETDEV_TX_OK;
}
static int __init ch_ipsec_init(void)
{
cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info);
return 0;
}
static void __exit ch_ipsec_exit(void)
{
struct ipsec_uld_ctx *u_ctx, *tmp;
struct adapter *adap;
mutex_lock(&dev_mutex);
list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
adap = pci_get_drvdata(u_ctx->lldi.pdev);
atomic_set(&adap->ch_ipsec_stats.ipsec_cnt, 0);
list_del(&u_ctx->entry);
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
cxgb4_unregister_uld(CXGB4_ULD_IPSEC);
}
module_init(ch_ipsec_init);
module_exit(ch_ipsec_exit);
MODULE_DESCRIPTION("Crypto IPSEC for Chelsio Terminator cards.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
MODULE_VERSION(CHIPSEC_DRV_VERSION