#include <net/tc_act/tc_gate.h>
#include <linux/dsa/8021q.h>
#include "sja1105_vl.h"
#define SJA1105_SIZE_VL_STATUS 8
static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
struct sja1105_rule *rule,
u8 gate_state, s64 entry_time,
struct netlink_ext_ack *extack)
{
struct sja1105_gate_entry *e;
int rc;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (!e)
return -ENOMEM;
e->rule = rule;
e->gate_state = gate_state;
e->interval = entry_time;
if (list_empty(&gating_cfg->entries)) {
list_add(&e->list, &gating_cfg->entries);
} else {
struct sja1105_gate_entry *p;
list_for_each_entry(p, &gating_cfg->entries, list) {
if (p->interval == e->interval) {
NL_SET_ERR_MSG_MOD(extack,
"Gate conflict");
rc = -EBUSY;
goto err;
}
if (e->interval < p->interval)
break;
}
list_add(&e->list, p->list.prev);
}
gating_cfg->num_entries++;
return 0;
err:
kfree(e);
return rc;
}
static void
sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
u64 cycle_time)
{
struct sja1105_gate_entry *last_e;
struct sja1105_gate_entry *e;
struct list_head *prev;
list_for_each_entry(e, &gating_cfg->entries, list) {
struct sja1105_gate_entry *p;
prev = e->list.prev;
if (prev == &gating_cfg->entries)
continue;
p = list_entry(prev, struct sja1105_gate_entry, list);
p->interval = e->interval - p->interval;
}
last_e = list_last_entry(&gating_cfg->entries,
struct sja1105_gate_entry, list);
last_e->interval = cycle_time - last_e->interval;
}
static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
{
struct sja1105_gate_entry *e, *n;
list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
list_del(&e->list);
kfree(e);
}
}
static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
struct netlink_ext_ack *extack)
{
struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
struct sja1105_rule *rule;
s64 max_cycle_time = 0;
s64 its_base_time = 0;
int i, rc = 0;
sja1105_free_gating_config(gating_cfg);
list_for_each_entry(rule, &priv->flow_block.rules, list) {
if (rule->type != SJA1105_RULE_VL)
continue;
if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
continue;
if (max_cycle_time < rule->vl.cycle_time) {
max_cycle_time = rule->vl.cycle_time;
its_base_time = rule->vl.base_time;
}
}
if (!max_cycle_time)
return 0;
dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
max_cycle_time, its_base_time);
gating_cfg->base_time = its_base_time;
gating_cfg->cycle_time = max_cycle_time;
gating_cfg->num_entries = 0;
list_for_each_entry(rule, &priv->flow_block.rules, list) {
s64 time;
s64 rbt;
if (rule->type != SJA1105_RULE_VL)
continue;
if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
continue;
rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
its_base_time);
rbt -= its_base_time;
time = rbt;
for (i = 0; i < rule->vl.num_entries; i++) {
u8 gate_state = rule->vl.entries[i].gate_state;
s64 entry_time = time;
while (entry_time < max_cycle_time) {
rc = sja1105_insert_gate_entry(gating_cfg, rule,
gate_state,
entry_time,
extack);
if (rc)
goto err;
entry_time += rule->vl.cycle_time;
}
time += rule->vl.entries[i].interval;
}
}
sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
return 0;
err:
sja1105_free_gating_config(gating_cfg);
return rc;
}
static bool sja1105_vl_key_lower(struct sja1105_vl_lookup_entry *a,
struct sja1105_vl_lookup_entry *b)
{
if (a->macaddr < b->macaddr)
return true;
if (a->macaddr > b->macaddr)
return false;
if (a->vlanid < b->vlanid)
return true;
if (a->vlanid > b->vlanid)
return false;
if (a->port < b->port)
return true;
if (a->port > b->port)
return false;
if (a->vlanprior < b->vlanprior)
return true;
if (a->vlanprior > b->vlanprior)
return false;
return false;
}
static u16 sja1105_port_get_tag_8021q_vid(struct dsa_port *dp)
{
unsigned long bridge_num;
if (!dp->bridge)
return dsa_tag_8021q_standalone_vid(dp);
bridge_num = dsa_port_bridge_num_get(dp);
return dsa_tag_8021q_bridge_vid(bridge_num);
}
static int sja1105_init_virtual_links(struct sja1105_private *priv,
struct netlink_ext_ack *extack)
{
struct sja1105_vl_policing_entry *vl_policing;
struct sja1105_vl_forwarding_entry *vl_fwd;
struct sja1105_vl_lookup_entry *vl_lookup;
bool have_critical_virtual_links = false;
struct sja1105_table *table;
struct sja1105_rule *rule;
int num_virtual_links = 0;
int max_sharindx = 0;
int i, j, k;
list_for_each_entry(rule, &priv->flow_block.rules, list) {
if (rule->type != SJA1105_RULE_VL)
continue;
num_virtual_links += hweight_long(rule->port_mask);
if (rule->vl.type != SJA1105_VL_NONCRITICAL)
have_critical_virtual_links = true;
if (max_sharindx < rule->vl.sharindx)
max_sharindx = rule->vl.sharindx;
}
if (num_virtual_links > SJA1105_MAX_VL_LOOKUP_COUNT) {
NL_SET_ERR_MSG_MOD(extack, "Not enough VL entries available");
return -ENOSPC;
}
if (max_sharindx + 1 > SJA1105_MAX_VL_LOOKUP_COUNT) {
NL_SET_ERR_MSG_MOD(extack, "Policer index out of range");
return -ENOSPC;
}
max_sharindx = max_t(int, num_virtual_links, max_sharindx) + 1;
table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
}
if (!num_virtual_links)
return 0;
table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
table->entries = kcalloc(num_virtual_links,
table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = num_virtual_links;
vl_lookup = table->entries;
k = 0;
list_for_each_entry(rule, &priv->flow_block.rules, list) {
unsigned long port;
if (rule->type != SJA1105_RULE_VL)
continue;
for_each_set_bit(port, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
vl_lookup[k].port = port;
vl_lookup[k].macaddr = rule->key.vl.dmac;
if (rule->key.type == SJA1105_KEY_VLAN_AWARE_VL) {
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
struct dsa_port *dp = dsa_to_port(priv->ds, port);
u16 vid = sja1105_port_get_tag_8021q_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
}
if (rule->vl.type == SJA1105_VL_NONCRITICAL)
vl_lookup[k].destports = rule->vl.destports;
else
vl_lookup[k].iscritical = true;
vl_lookup[k].flow_cookie = rule->cookie;
k++;
}
}
for (i = 0; i < num_virtual_links; i++) {
struct sja1105_vl_lookup_entry *a = &vl_lookup[i];
for (j = i + 1; j < num_virtual_links; j++) {
struct sja1105_vl_lookup_entry *b = &vl_lookup[j];
if (sja1105_vl_key_lower(b, a)) {
struct sja1105_vl_lookup_entry tmp = *a;
*a = *b;
*b = tmp;
}
}
}
if (!have_critical_virtual_links)
return 0;
table = &priv->static_config.tables[BLK_IDX_VL_POLICING];
table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = max_sharindx;
vl_policing = table->entries;
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING];
table->entries = kcalloc(max_sharindx, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = max_sharindx;
vl_fwd = table->entries;
table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
table->entries = kcalloc(1, table->ops->unpacked_entry_size,
GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
table->entry_count = 1;
for (i = 0; i < num_virtual_links; i++) {
unsigned long cookie = vl_lookup[i].flow_cookie;
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
if (rule->vl.type == SJA1105_VL_NONCRITICAL)
continue;
if (rule->vl.type == SJA1105_VL_TIME_TRIGGERED) {
int sharindx = rule->vl.sharindx;
vl_policing[i].type = 1;
vl_policing[i].sharindx = sharindx;
vl_policing[i].maxlen = rule->vl.maxlen;
vl_policing[sharindx].type = 1;
vl_fwd[i].type = 1;
vl_fwd[sharindx].type = 1;
vl_fwd[sharindx].priority = rule->vl.ipv;
vl_fwd[sharindx].partition = 0;
vl_fwd[sharindx].destports = rule->vl.destports;
}
}
sja1105_frame_memory_partitioning(priv);
return 0;
}
int sja1105_vl_redirect(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack, unsigned long cookie,
struct sja1105_key *key, unsigned long destports,
bool append)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
struct dsa_port *dp = dsa_to_port(priv->ds, port);
bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int rc;
if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
} else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
}
if (!rule) {
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
rule->cookie = cookie;
rule->type = SJA1105_RULE_VL;
rule->key = *key;
list_add(&rule->list, &priv->flow_block.rules);
}
rule->port_mask |= BIT(port);
if (append)
rule->vl.destports |= destports;
else
rule->vl.destports = destports;
rc = sja1105_init_virtual_links(priv, extack);
if (rc) {
rule->port_mask &= ~BIT(port);
if (!rule->port_mask) {
list_del(&rule->list);
kfree(rule);
}
}
return rc;
}
int sja1105_vl_delete(struct sja1105_private *priv, int port,
struct sja1105_rule *rule, struct netlink_ext_ack *extack)
{
int rc;
rule->port_mask &= ~BIT(port);
if (!rule->port_mask) {
list_del(&rule->list);
kfree(rule);
}
rc = sja1105_compose_gating_subschedule(priv, extack);
if (rc)
return rc;
rc = sja1105_init_virtual_links(priv, extack);
if (rc)
return rc;
rc = sja1105_init_scheduling(priv);
if (rc < 0)
return rc;
return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
}
int sja1105_vl_gate(struct sja1105_private *priv, int port,
struct netlink_ext_ack *extack, unsigned long cookie,
struct sja1105_key *key, u32 index, s32 prio,
u64 base_time, u64 cycle_time, u64 cycle_time_ext,
u32 num_entries, struct action_gate_entry *entries)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
struct dsa_port *dp = dsa_to_port(priv->ds, port);
bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int ipv = -1;
int i, rc;
s32 rem;
if (cycle_time_ext) {
NL_SET_ERR_MSG_MOD(extack,
"Cycle time extension not supported");
return -EOPNOTSUPP;
}
div_s64_rem(base_time, sja1105_delta_to_ns(1), &rem);
if (rem) {
NL_SET_ERR_MSG_MOD(extack,
"Base time must be multiple of 200 ns");
return -ERANGE;
}
div_s64_rem(cycle_time, sja1105_delta_to_ns(1), &rem);
if (rem) {
NL_SET_ERR_MSG_MOD(extack,
"Cycle time must be multiple of 200 ns");
return -ERANGE;
}
if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
} else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
}
if (!rule) {
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
return -ENOMEM;
list_add(&rule->list, &priv->flow_block.rules);
rule->cookie = cookie;
rule->type = SJA1105_RULE_VL;
rule->key = *key;
rule->vl.type = SJA1105_VL_TIME_TRIGGERED;
rule->vl.sharindx = index;
rule->vl.base_time = base_time;
rule->vl.cycle_time = cycle_time;
rule->vl.num_entries = num_entries;
rule->vl.entries = kcalloc(num_entries,
sizeof(struct action_gate_entry),
GFP_KERNEL);
if (!rule->vl.entries) {
rc = -ENOMEM;
goto out;
}
for (i = 0; i < num_entries; i++) {
div_s64_rem(entries[i].interval,
sja1105_delta_to_ns(1), &rem);
if (rem) {
NL_SET_ERR_MSG_MOD(extack,
"Interval must be multiple of 200 ns");
rc = -ERANGE;
goto out;
}
if (!entries[i].interval) {
NL_SET_ERR_MSG_MOD(extack,
"Interval cannot be zero");
rc = -ERANGE;
goto out;
}
if (ns_to_sja1105_delta(entries[i].interval) >
SJA1105_TAS_MAX_DELTA) {
NL_SET_ERR_MSG_MOD(extack,
"Maximum interval is 52 ms");
rc = -ERANGE;
goto out;
}
if (entries[i].maxoctets != -1) {
NL_SET_ERR_MSG_MOD(extack,
"Cannot offload IntervalOctetMax");
rc = -EOPNOTSUPP;
goto out;
}
if (ipv == -1) {
ipv = entries[i].ipv;
} else if (ipv != entries[i].ipv) {
NL_SET_ERR_MSG_MOD(extack,
"Only support a single IPV per VL");
rc = -EOPNOTSUPP;
goto out;
}
rule->vl.entries[i] = entries[i];
}
if (ipv == -1) {
if (key->type == SJA1105_KEY_VLAN_AWARE_VL)
ipv = key->vl.pcp;
else
ipv = 0;
}
rule->vl.maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
rule->vl.ipv = ipv;
}
rule->port_mask |= BIT(port);
rc = sja1105_compose_gating_subschedule(priv, extack);
if (rc)
goto out;
rc = sja1105_init_virtual_links(priv, extack);
if (rc)
goto out;
if (sja1105_gating_check_conflicts(priv, -1, extack)) {
NL_SET_ERR_MSG_MOD(extack, "Conflict with tc-taprio schedule");
rc = -ERANGE;
goto out;
}
out:
if (rc) {
rule->port_mask &= ~BIT(port);
if (!rule->port_mask) {
list_del(&rule->list);
kfree(rule->vl.entries);
kfree(rule);
}
}
return rc;
}
static int sja1105_find_vlid(struct sja1105_private *priv, int port,
struct sja1105_key *key)
{
struct sja1105_vl_lookup_entry *vl_lookup;
struct sja1105_table *table;
int i;
if (WARN_ON(key->type != SJA1105_KEY_VLAN_AWARE_VL &&
key->type != SJA1105_KEY_VLAN_UNAWARE_VL))
return -1;
table = &priv->static_config.tables[BLK_IDX_VL_LOOKUP];
vl_lookup = table->entries;
for (i = 0; i < table->entry_count; i++) {
if (key->type == SJA1105_KEY_VLAN_AWARE_VL) {
if (vl_lookup[i].port == port &&
vl_lookup[i].macaddr == key->vl.dmac &&
vl_lookup[i].vlanid == key->vl.vid &&
vl_lookup[i].vlanprior == key->vl.pcp)
return i;
} else {
if (vl_lookup[i].port == port &&
vl_lookup[i].macaddr == key->vl.dmac)
return i;
}
}
return -1;
}
int sja1105_vl_stats(struct sja1105_private *priv, int port,
struct sja1105_rule *rule, struct flow_stats *stats,
struct netlink_ext_ack *extack)
{
const struct sja1105_regs *regs = priv->info->regs;
u8 buf[SJA1105_SIZE_VL_STATUS] = {0};
u64 unreleased;
u64 timingerr;
u64 lengtherr;
int vlid, rc;
u64 pkts;
if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
return 0;
vlid = sja1105_find_vlid(priv, port, &rule->key);
if (vlid < 0)
return 0;
rc = sja1105_xfer_buf(priv, SPI_READ, regs->vl_status + 2 * vlid, buf,
SJA1105_SIZE_VL_STATUS);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "SPI access failed");
return rc;
}
sja1105_unpack(buf, &timingerr, 31, 16, SJA1105_SIZE_VL_STATUS);
sja1105_unpack(buf, &unreleased, 15, 0, SJA1105_SIZE_VL_STATUS);
sja1105_unpack(buf, &lengtherr, 47, 32, SJA1105_SIZE_VL_STATUS);
pkts = timingerr + unreleased + lengtherr;
flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0,
jiffies - rule->vl.stats.lastused,
FLOW_ACTION_HW_STATS_IMMEDIATE);
rule->vl.stats.pkts = pkts;
rule->vl.stats.lastused = jiffies;
return 0;
}